code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = 'timm_backbone'
def __init__( self : str , _lowerCAmelCase : int=None , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[str] , ) -> int:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = backbone
__lowercase = num_channels
__lowercase = features_only
__lowercase = use_pretrained_backbone
__lowercase = True
__lowercase = out_indices if out_indices is not None else (-1,)
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowercase = v
else:
__lowercase = v
__lowercase = chkpt["""params"""]
__lowercase = {n: v for n, v in config.items() if not isinstance(lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
__lowercase = chkpt["""dico_word2id"""]
__lowercase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowercase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__UpperCamelCase : Optional[Any] = HfArgumentParser(InitializationArguments)
__UpperCamelCase : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__UpperCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__UpperCamelCase : Optional[int] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {"""vocab_file""": """vocab.json"""}
__UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
__UpperCamelCase : List[Any] = {"""mgp-str""": 27}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = VOCAB_FILES_NAMES
__snake_case :Dict = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : int="[GO]" , _lowerCAmelCase : Optional[int]="[GO]" , _lowerCAmelCase : List[str]="[s]" , _lowerCAmelCase : Tuple="[GO]" , **_lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__lowercase = json.load(_lowerCAmelCase )
__lowercase = {v: k for k, v in self.vocab.items()}
@property
def _a ( self : int ) -> int:
"""simple docstring"""
return len(self.vocab )
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def _a ( self : Any , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = []
for s in text:
char_tokens.extend(_lowerCAmelCase )
return char_tokens
def _a ( self : Any , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def _a ( self : Any , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_lowerCAmelCase ) )
return
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
return (vocab_file,)
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def get_matched_characters(lowerCamelCase , lowerCamelCase ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase )
__lowercase = F'{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}'
return "".join(lowerCamelCase )
# matching characters
__lowercase = get_matched_characters(lowerCamelCase , lowerCamelCase )
__lowercase = get_matched_characters(lowerCamelCase , lowerCamelCase )
__lowercase = len(lowerCamelCase )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(lowerCamelCase )
+ match_count / len(lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase = """"""
else:
__lowercase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True ):
'''simple docstring'''
__lowercase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowercase = 8
# set labels if required
if not base_model:
__lowercase = 1_000
__lowercase = """huggingface/label-files"""
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowercase = 384
__lowercase = 1_536
__lowercase = 12
__lowercase = 6
# load original model from torch hub
__lowercase = torch.hub.load("""facebookresearch/dino:main""" , lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase )
__lowercase = create_rename_keys(lowerCamelCase , base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# load HuggingFace model
if base_model:
__lowercase = ViTModel(lowerCamelCase , add_pooling_layer=lowerCamelCase ).eval()
else:
__lowercase = ViTForImageClassification(lowerCamelCase ).eval()
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__lowercase = ViTImageProcessor()
__lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase = encoding["""pixel_values"""]
__lowercase = model(lowerCamelCase )
if base_model:
__lowercase = original_model(lowerCamelCase )
assert torch.allclose(lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__lowercase = original_model(lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
__UpperCamelCase : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : str = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = DebertaVaTokenizer
__snake_case :Optional[Any] = DebertaVaTokenizerFast
__snake_case :int = True
__snake_case :str = True
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : str , _lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = """this is a test"""
__lowercase = """this is a test"""
return input_text, output_text
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = """<pad>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_lowerCAmelCase ) , 3_0001 )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , do_lower_case=_lowerCAmelCase , split_by_punct=_lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__lowercase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(_lowerCAmelCase )
__lowercase = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = """This is a test"""
__lowercase = [13, 1, 4398, 25, 21, 1289]
__lowercase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = DebertaVaTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
__lowercase = DebertaVaTokenizerFast(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
__lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# fmt: off
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__lowercase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = DebertaVaTokenizer(_lowerCAmelCase )
__lowercase = tokenizer.encode("""sequence builders""" )
__lowercase = tokenizer.encode("""multi-sequence build""" )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _lowerCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _lowerCAmelCase , )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {"""input_ids""": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 53
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__snake_case :str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
__snake_case :ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
__snake_case :ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
__snake_case :str = "question"
__snake_case :str = "context"
__snake_case :str = "answers"
@property
def _a ( self : Optional[int] ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 53
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : Optional[Any] = 16
__UpperCamelCase : Tuple = 32
def snake_case ( lowerCamelCase , lowerCamelCase = 16 ):
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase = 16
elif accelerator.mixed_precision != "no":
__lowercase = 8
else:
__lowercase = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : Optional[int] = mocked_dataloaders # noqa: F811
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase ) == "1":
__lowercase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
set_seed(lowerCamelCase )
__lowercase , __lowercase = get_dataloaders(lowerCamelCase , lowerCamelCase )
__lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase = batch_size // MAX_GPU_BATCH_SIZE
__lowercase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase = model.to(accelerator.device )
# Instantiate optimizer
__lowercase = AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowercase = os.path.split(lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowercase = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
__lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(lowerCamelCase ),
"""epoch""": epoch,
} , step=lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True ):
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__lowercase = timm.create_model("""levit_128s""" , pretrained=lowerCamelCase )
else:
__lowercase = timm.create_model("""levit_128""" , pretrained=lowerCamelCase )
if hidden_sizes == 192:
__lowercase = timm.create_model("""levit_192""" , pretrained=lowerCamelCase )
if hidden_sizes == 256:
__lowercase = timm.create_model("""levit_256""" , pretrained=lowerCamelCase )
if hidden_sizes == 384:
__lowercase = timm.create_model("""levit_384""" , pretrained=lowerCamelCase )
from_model.eval()
__lowercase = LevitForImageClassificationWithTeacher(lowerCamelCase ).eval()
__lowercase = OrderedDict()
__lowercase = from_model.state_dict()
__lowercase = list(from_model.state_dict().keys() )
__lowercase = list(our_model.state_dict().keys() )
print(len(lowerCamelCase ) , len(lowerCamelCase ) )
for i in range(len(lowerCamelCase ) ):
__lowercase = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase )
__lowercase = torch.randn((2, 3, 224, 224) )
__lowercase = from_model(lowerCamelCase )
__lowercase = our_model(lowerCamelCase ).logits
assert torch.allclose(lowerCamelCase , lowerCamelCase ), "The model logits don't match the original one."
__lowercase = name
print(lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowercase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True ):
'''simple docstring'''
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = 1_000
__lowercase = (1, num_labels)
__lowercase = """huggingface/label-files"""
__lowercase = num_labels
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
__lowercase = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
__lowercase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 53
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 1
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = ShapEPipeline
__snake_case :Dict = ['prompt']
__snake_case :Union[str, Any] = ['prompt']
__snake_case :Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__snake_case :str = False
@property
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def _a ( self : Any ) -> Dict:
"""simple docstring"""
return 32
@property
def _a ( self : int ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
return 8
@property
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def _a ( self : Any ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowercase = PriorTransformer(**_lowerCAmelCase )
return model
@property
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowercase = ShapERenderer(**_lowerCAmelCase )
return model
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.dummy_prior
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_renderer
__lowercase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
__lowercase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=0 ) -> List[Any]:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = """cpu"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCAmelCase )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
__lowercase = output.images[0]
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = torch_device == """cpu"""
__lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCAmelCase )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = 1
__lowercase = 2
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__lowercase = batch_size * [inputs[key]]
__lowercase = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowercase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__lowercase = pipe(
"""a shark""" , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 53
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 1
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Dict=[1, 2, 1] , _lowerCAmelCase : Optional[int]=[2, 2, 4] , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[str]=1e-5 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : str=8 , _lowerCAmelCase : str=["stage1", "stage2", "stage3"] , _lowerCAmelCase : List[str]=[1, 2, 3] , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__snake_case :Union[str, Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__snake_case :str = False
__snake_case :int = False
__snake_case :Tuple = False
__snake_case :Tuple = False
__snake_case :Optional[Any] = False
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
pass
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCAmelCase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int={} ):
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'
F' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__snake_case :Any = MaskFormerSwinConfig
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
__lowercase = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 53
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 1
|
from functools import lru_cache
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 2
__lowercase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCamelCase )
if n > 1:
factors.add(lowerCamelCase )
return factors
@lru_cache
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return len(unique_prime_factors(lowerCamelCase ) )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return len(set(lowerCamelCase ) ) in (0, 1)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 2
while True:
# Increment each value of a generated range
__lowercase = [base + i for i in range(lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowercase = [upf_len(lowerCamelCase ) for x in group]
checker.append(lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case ( lowerCamelCase = 4 ):
'''simple docstring'''
__lowercase = run(lowerCamelCase )
return results[0] if len(lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__UpperCamelCase : List[str] = get_logger(__name__)
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : Optional[str] = None ) -> Tuple:
"""simple docstring"""
__lowercase = (
os.path.join(_lowerCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowercase = Extractor
def _a ( self : str , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowercase = os.path.abspath(_lowerCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_lowerCAmelCase ) )
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(_lowerCAmelCase ) and not (os.path.isdir(_lowerCAmelCase ) and os.listdir(_lowerCAmelCase ))
)
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> str:
"""simple docstring"""
__lowercase = self.extractor.infer_extractor_format(_lowerCAmelCase )
if not extractor_format:
return input_path
__lowercase = self._get_output_path(_lowerCAmelCase )
if self._do_extract(_lowerCAmelCase , _lowerCAmelCase ):
self.extractor.extract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return output_path
class __UpperCamelCase ( _lowerCAmelCase ):
@classmethod
@abstractmethod
def _a ( cls : List[str] , _lowerCAmelCase : Union[Path, str] , **_lowerCAmelCase : str ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
...
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[bytes] = []
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
with open(_lowerCAmelCase , """rb""" ) as f:
return f.read(_lowerCAmelCase )
@classmethod
def _a ( cls : Any , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__lowercase = max(len(_lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowercase = cls.read_magic_number(_lowerCAmelCase , _lowerCAmelCase )
except OSError:
return False
return any(magic_number.startswith(_lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
class __UpperCamelCase ( _lowerCAmelCase ):
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Union[Path, str] , **_lowerCAmelCase : str ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(_lowerCAmelCase )
@staticmethod
def _a ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
def resolved(_lowerCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_lowerCAmelCase ) )
def badpath(_lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ).startswith(_lowerCAmelCase )
def badlink(_lowerCAmelCase : Any , _lowerCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowercase = resolved(os.path.join(_lowerCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_lowerCAmelCase )
__lowercase = resolved(_lowerCAmelCase )
for finfo in members:
if badpath(finfo.name , _lowerCAmelCase ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(_lowerCAmelCase , _lowerCAmelCase ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(_lowerCAmelCase , _lowerCAmelCase ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowercase = tarfile.open(_lowerCAmelCase )
tar_file.extractall(_lowerCAmelCase , members=TarExtractor.safemembers(_lowerCAmelCase , _lowerCAmelCase ) )
tar_file.close()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = [B'\x1F\x8B']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(_lowerCAmelCase , """rb""" ) as gzip_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def _a ( cls : Optional[Any] , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(_lowerCAmelCase , magic_number=_lowerCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_lowerCAmelCase , """rb""" ) as fp:
__lowercase = _EndRecData(_lowerCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowercase = fp.read(_lowerCAmelCase ) # CD is where we expect it to be
if len(_lowerCAmelCase ) == sizeCentralDir:
__lowercase = struct.unpack(_lowerCAmelCase , _lowerCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with zipfile.ZipFile(_lowerCAmelCase , """r""" ) as zip_file:
zip_file.extractall(_lowerCAmelCase )
zip_file.close()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(_lowerCAmelCase ) as compressed_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowercase = rarfile.RarFile(_lowerCAmelCase )
rf.extractall(_lowerCAmelCase )
rf.close()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = [B'\x28\xb5\x2F\xFD']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__lowercase = zstd.ZstdDecompressor()
with open(_lowerCAmelCase , """rb""" ) as ifh, open(_lowerCAmelCase , """wb""" ) as ofh:
dctx.copy_stream(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = [B'\x42\x5A\x68']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(_lowerCAmelCase , """rb""" ) as compressed_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with pyazr.SevenZipFile(_lowerCAmelCase , """r""" ) as archive:
archive.extractall(_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = [B'\x04\x22\x4D\x18']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_lowerCAmelCase , """rb""" ) as compressed_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__snake_case :Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _a ( cls : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return max(
len(_lowerCAmelCase )
for extractor in cls.extractors.values()
if issubclass(_lowerCAmelCase , _lowerCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_lowerCAmelCase , magic_number_length=_lowerCAmelCase )
except OSError:
return b""
@classmethod
def _a ( cls : str , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_lowerCAmelCase , )
__lowercase = cls.infer_extractor_format(_lowerCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _a ( cls : Optional[int] , _lowerCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__lowercase = cls._get_magic_number_max_length()
__lowercase = cls._read_magic_number(_lowerCAmelCase , _lowerCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_lowerCAmelCase , magic_number=_lowerCAmelCase ):
return extractor_format
@classmethod
def _a ( cls : List[str] , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(_lowerCAmelCase ) , exist_ok=_lowerCAmelCase )
# Prevent parallel extractions
__lowercase = str(Path(_lowerCAmelCase ).with_suffix(""".lock""" ) )
with FileLock(_lowerCAmelCase ):
shutil.rmtree(_lowerCAmelCase , ignore_errors=_lowerCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_lowerCAmelCase , _lowerCAmelCase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_lowerCAmelCase , )
__lowercase = extractor if extractor != """deprecated""" else extractor_format
else:
__lowercase = cls.extractors[extractor_format]
return extractor.extract(_lowerCAmelCase , _lowerCAmelCase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_lowerCAmelCase ):
return extractor.extract(_lowerCAmelCase , _lowerCAmelCase )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : Union[str, Any] = logging.getLogger()
__UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : int , _lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowercase = {"""source""": """What is love ?""", """target""": """life"""}
__lowercase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__lowercase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(_lowerCAmelCase , F'{split}.{field}' ) , """w""" ) as f:
f.write(_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : str = "pytorch" ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = os.path.join(_lowerCAmelCase , """output""" )
__lowercase = os.path.join(_lowerCAmelCase , """data""" )
self._create_dummy_data(data_dir=_lowerCAmelCase )
__lowercase = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
__lowercase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
__lowercase = os.path.join(_lowerCAmelCase , """metrics.json""" )
with open(_lowerCAmelCase ) as f:
__lowercase = json.load(_lowerCAmelCase )
return result
@require_torch_gpu
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 53
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def snake_case ( lowerCamelCase ):
'''simple docstring'''
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__lowercase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowercase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__lowercase = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def snake_case ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
'''simple docstring'''
def identity_function(lowerCamelCase ) -> float:
return x
__lowercase = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__lowercase = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=3 , _lowerCAmelCase : int=32 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=10 , _lowerCAmelCase : Union[str, Any]=[10, 20, 30, 40] , _lowerCAmelCase : List[str]=[1, 1, 2, 1] , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]="relu" , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=None , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(_lowerCAmelCase )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = self.get_config()
return config, pixel_values
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = FlaxRegNetModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = FlaxRegNetForImageClassification(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case :Tuple = False
__snake_case :List[Any] = False
__snake_case :Dict = False
def _a ( self : Any ) -> None:
"""simple docstring"""
__lowercase = FlaxRegNetModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
return
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model_class(_lowerCAmelCase )
@jax.jit
def model_jitted(_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[int] ):
return model(pixel_values=_lowerCAmelCase , **_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowercase = model_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = model_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = (1, 1000)
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case ( lowerCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("""env""" )
else:
__lowercase = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = is_xpu_available()
__lowercase = is_npu_available()
__lowercase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
__lowercase = load_config_from_file(args.config_file ).to_dict()
__lowercase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase ),
"""PyTorch NPU available""": str(lowerCamelCase ),
"""System RAM""": F'{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB',
}
if pt_cuda_available:
__lowercase = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__lowercase = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase , lowerCamelCase )
else F'\t{accelerate_config}'
)
print(lowerCamelCase )
__lowercase = accelerate_config
return info
def snake_case ( ):
'''simple docstring'''
__lowercase = env_command_parser()
__lowercase = parser.parse_args()
env_command(lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
import os
def snake_case ( ):
'''simple docstring'''
__lowercase = os.path.dirname(os.path.realpath(lowerCamelCase ) )
__lowercase = os.path.join(lowerCamelCase , """triangle.txt""" )
with open(lowerCamelCase ) as f:
__lowercase = f.readlines()
__lowercase = []
for line in triangle:
__lowercase = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase ) )
a.append(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowercase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowercase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase , lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCamelCase : Optional[int] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__UpperCamelCase : List[str] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__UpperCamelCase : Optional[int] = """|""".join(sys.argv[1:])
__UpperCamelCase : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__UpperCamelCase : Tuple = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : int , _lowerCAmelCase : GenericTensor ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
__lowercase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowercase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _a ( self : List[str] , _lowerCAmelCase : GenericTensor ) -> np.ndarray:
"""simple docstring"""
__lowercase = self.get_masked_index(_lowerCAmelCase )
__lowercase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def _a ( self : List[Any] , _lowerCAmelCase : GenericTensor ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[str] ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
__lowercase = self.framework
__lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def _a ( self : List[Any] , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.model(**_lowerCAmelCase )
__lowercase = model_inputs["""input_ids"""]
return model_outputs
def _a ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str=5 , _lowerCAmelCase : str=None ) -> Tuple:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
__lowercase = target_ids.shape[0]
__lowercase = model_outputs["""input_ids"""][0]
__lowercase = model_outputs["""logits"""]
if self.framework == "tf":
__lowercase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowercase = outputs.numpy()
__lowercase = outputs[0, masked_index, :]
__lowercase = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
__lowercase = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowercase = tf.expand_dims(_lowerCAmelCase , 0 )
__lowercase = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
__lowercase , __lowercase = topk.values.numpy(), topk.indices.numpy()
else:
__lowercase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowercase = outputs[0, masked_index, :]
__lowercase = logits.softmax(dim=-1 )
if target_ids is not None:
__lowercase = probs[..., target_ids]
__lowercase , __lowercase = probs.topk(_lowerCAmelCase )
__lowercase = []
__lowercase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowercase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowercase = input_ids.numpy().copy()
if target_ids is not None:
__lowercase = target_ids[p].tolist()
__lowercase = p
# Filter padding out:
__lowercase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowercase = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
__lowercase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def _a ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [targets]
try:
__lowercase = self.tokenizer.get_vocab()
except Exception:
__lowercase = {}
__lowercase = []
for target in targets:
__lowercase = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
__lowercase = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )["""input_ids"""]
if len(_lowerCAmelCase ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
__lowercase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__lowercase = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
__lowercase = np.array(_lowerCAmelCase )
return target_ids
def _a ( self : Any , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None ) -> Tuple:
"""simple docstring"""
__lowercase = {}
if targets is not None:
__lowercase = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = target_ids
if top_k is not None:
__lowercase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
from __future__ import annotations
import requests
__UpperCamelCase : Optional[Any] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def snake_case ( lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = "new" , lowerCamelCase = None ):
'''simple docstring'''
__lowercase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase ) - valid_terms ) ):
__lowercase = F'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCamelCase )
__lowercase = requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
__lowercase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase )}
__lowercase = {}
for id_ in range(lowerCamelCase ):
__lowercase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = """laion/clap-htsat-unfused"""
__lowercase = tempfile.mkdtemp()
def _a ( self : Any , **_lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def _a ( self : Union[str, Any] , **_lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase = self.get_feature_extractor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
__lowercase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__lowercase = processor(audios=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
__lowercase = """This is a test string"""
__lowercase = processor(text=_lowerCAmelCase )
__lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(_lowerCAmelCase )
__lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = ClapProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
import unittest
from knapsack import greedy_knapsack as kp
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [10, 20, 30, 40, 50, 60]
__lowercase = [2, 4, 6, 8, 10, 12]
__lowercase = 100
self.assertEqual(kp.calc_profit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , 210 )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , """max_weight must greater than zero.""" )
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , """Weight can not be negative.""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , """Profit can not be negative.""" )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , """max_weight must greater than zero.""" )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(
_lowerCAmelCase , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
import qiskit
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(lowerCamelCase , lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowercase = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 100 , ):
'''simple docstring'''
__lowercase = x_start
__lowercase = fnc(lowerCamelCase )
__lowercase = 0.0
for _ in range(lowerCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__lowercase = (x_end - x_start) / steps + xa
__lowercase = fnc(lowerCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__lowercase = xa
__lowercase = fxa
return length
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
__UpperCamelCase : Any = 10
while i <= 100000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__UpperCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(
default=_lowerCAmelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(_lowerCAmelCase )} )
__snake_case :str = field(
default=_lowerCAmelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__snake_case :int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__snake_case :int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__snake_case :float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__snake_case :int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__snake_case :int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__snake_case :int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'train'
__snake_case :Union[str, Any] = 'dev'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :SquadDataTrainingArguments
__snake_case :List[SquadFeatures]
__snake_case :Split
__snake_case :bool
def __init__( self : Union[str, Any] , _lowerCAmelCase : SquadDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[bool] = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = "pt" , ) -> int:
"""simple docstring"""
__lowercase = args
__lowercase = is_language_sensitive
__lowercase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowercase = mode
# Load data features from cache or dataset file
__lowercase = """v2""" if args.version_2_with_negative else """v1"""
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowercase = self.old_features["""features"""]
__lowercase = self.old_features.get("""dataset""" , _lowerCAmelCase )
__lowercase = self.old_features.get("""examples""" , _lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
__lowercase , __lowercase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowerCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCAmelCase , )
__lowercase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowerCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : str ) -> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Any , _lowerCAmelCase : Union[str, Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__lowercase = self.features[i]
__lowercase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowercase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowercase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowercase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowercase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowercase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowercase = torch.tensor(feature.start_position , dtype=torch.long )
__lowercase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 53
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = np.max(_outputs , axis=-1 , keepdims=lowerCamelCase )
__lowercase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = 'sigmoid'
__snake_case :List[str] = 'softmax'
__snake_case :Any = 'none'
@add_end_docstrings(
_lowerCAmelCase , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = False
__snake_case :int = ClassificationFunction.NONE
def __init__( self : Optional[Any] , **_lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]="" , **_lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = tokenizer_kwargs
__lowercase = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__lowercase = self.model.config.return_all_scores
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k is None:
__lowercase = top_k
__lowercase = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , _lowerCAmelCase , )
if return_all_scores:
__lowercase = None
else:
__lowercase = 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__lowercase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : int , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__lowercase = """top_k""" not in kwargs
if isinstance(args[0] , _lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , **_lowerCAmelCase : Tuple ) -> Dict[str, GenericTensor]:
"""simple docstring"""
__lowercase = self.framework
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return self.tokenizer(**_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1 and isinstance(inputs[0] , _lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.model(**_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=1 , _lowerCAmelCase : List[str]=True ) -> Dict:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__lowercase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__lowercase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__lowercase = self.model.config.function_to_apply
else:
__lowercase = ClassificationFunction.NONE
__lowercase = model_outputs["""logits"""][0]
__lowercase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__lowercase = sigmoid(_lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__lowercase = softmax(_lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
__lowercase = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__lowercase = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(_lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda _lowerCAmelCase : x["score"] , reverse=_lowerCAmelCase )
if top_k is not None:
__lowercase = dict_scores[:top_k]
return dict_scores
| 53
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 1
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
__UpperCamelCase : int = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
__UpperCamelCase : Dict = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCamelCase : int = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) )
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
__lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowercase = """fp16"""
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
__lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
__lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__lowercase = """fp16"""
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
| 53
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """file.csv"""
__lowercase = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """malformed_file.csv"""
__lowercase = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """csv_with_image.csv"""
__lowercase = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """csv_with_label.csv"""
__lowercase = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
@pytest.fixture
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """csv_with_int_list.csv"""
__lowercase = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = Csv()
__lowercase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , encoding="""utf-8""" ) as f:
__lowercase = f.read().splitlines()[1]
__lowercase = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
__lowercase = csv._generate_tables([[csv_file_with_image]] )
__lowercase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
__lowercase = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , encoding="""utf-8""" ) as f:
__lowercase = f.read().splitlines()[1:]
__lowercase = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
__lowercase = csv._generate_tables([[csv_file_with_label]] )
__lowercase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
__lowercase = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase ) for label in labels]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase : [int(lowerCamelCase ) for i in x.split()]} )
__lowercase = csv._generate_tables([[csv_file_with_int_list]] )
__lowercase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
__lowercase = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 53
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 1
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCamelCase : List[Any] = NewType("""DataClass""", Any)
__UpperCamelCase : str = NewType("""DataClassType""", Any)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {str(lowerCamelCase ): choice for choice in choices}
return lambda lowerCamelCase : str_to_choice.get(lowerCamelCase , lowerCamelCase )
def snake_case ( *,
lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = dataclasses.MISSING , lowerCamelCase = dataclasses.MISSING , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowercase = {}
if aliases is not None:
__lowercase = aliases
if help is not None:
__lowercase = help
return dataclasses.field(metadata=lowerCamelCase , default=lowerCamelCase , default_factory=lowerCamelCase , **lowerCamelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Iterable[DataClassType]
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "formatter_class" not in kwargs:
__lowercase = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
__lowercase = [dataclass_types]
__lowercase = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def _a ( _lowerCAmelCase : ArgumentParser , _lowerCAmelCase : dataclasses.Field ) -> int:
"""simple docstring"""
__lowercase = F'--{field.name}'
__lowercase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
__lowercase = kwargs.pop("""aliases""" , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = [aliases]
__lowercase = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , """UnionType""" ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F' Problem encountered in field \'{field.name}\'.' )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__lowercase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowercase = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowercase = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowercase = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowercase = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
__lowercase = field.type.__args__
else:
__lowercase = [x.value for x in field.type]
__lowercase = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
__lowercase = field.default
else:
__lowercase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowercase = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__lowercase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowercase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowercase = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowercase = """?"""
# This is the value that will get picked if we do --field_name (without value)
__lowercase = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = field.type.__args__[0]
__lowercase = """+"""
if field.default_factory is not dataclasses.MISSING:
__lowercase = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowercase = True
else:
__lowercase = field.type
if field.default is not dataclasses.MISSING:
__lowercase = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowercase = field.default_factory()
else:
__lowercase = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowercase = False
parser.add_argument(F'--no_{field.name}' , action="""store_false""" , dest=field.name , **_lowerCAmelCase )
def _a ( self : List[str] , _lowerCAmelCase : DataClassType ) -> List[Any]:
"""simple docstring"""
if hasattr(_lowerCAmelCase , """_argument_group_name""" ):
__lowercase = self.add_argument_group(dtype._argument_group_name )
else:
__lowercase = self
try:
__lowercase = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
__lowercase = """.""".join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
__lowercase = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowercase = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowercase = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowercase , __lowercase = args_file_parser.parse_known_args(args=_lowerCAmelCase )
__lowercase = vars(_lowerCAmelCase ).get(args_file_flag.lstrip("""-""" ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
__lowercase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowercase = file_args + args if args is not None else file_args + sys.argv[1:]
__lowercase , __lowercase = self.parse_known_args(args=_lowerCAmelCase )
__lowercase = []
for dtype in self.dataclass_types:
__lowercase = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
__lowercase = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _a ( self : int , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowercase = set(args.keys() )
__lowercase = []
for dtype in self.dataclass_types:
__lowercase = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
__lowercase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowercase = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}' )
return tuple(_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(_lowerCAmelCase ) , encoding="""utf-8""" ) as open_json_file:
__lowercase = json.loads(open_json_file.read() )
__lowercase = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
__lowercase = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 53
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 1
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case ( lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class __UpperCamelCase :
__snake_case :str = field(
metadata={'help': 'The csv file to plot.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__snake_case :Optional[List[str]] = list_field(
default=_lowerCAmelCase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
try:
int(lowerCamelCase )
return True
except ValueError:
return False
def snake_case ( lowerCamelCase ):
'''simple docstring'''
try:
float(lowerCamelCase )
return True
except ValueError:
return False
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = args
__lowercase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__lowercase = csv.DictReader(_lowerCAmelCase )
for row in reader:
__lowercase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__lowercase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__lowercase = float(row["""result"""] )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = plt.subplots()
__lowercase = """Time usage""" if self.args.is_time else """Memory usage"""
__lowercase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__lowercase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__lowercase = self.result_dict[model_name]["""result"""]
((__lowercase) , (__lowercase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCAmelCase , )
else:
__lowercase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase) , (__lowercase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__lowercase = np.asarray(_lowerCAmelCase , _lowerCAmelCase )[: len(_lowerCAmelCase )]
plt.scatter(
_lowerCAmelCase , _lowerCAmelCase , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(_lowerCAmelCase , _lowerCAmelCase , """--""" )
title_str += F' {label_model_name} vs.'
__lowercase = title_str[:-4]
__lowercase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_lowerCAmelCase )
plt.xlabel(_lowerCAmelCase )
plt.ylabel(_lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser(lowerCamelCase )
__lowercase = parser.parse_args_into_dataclasses()[0]
__lowercase = Plot(args=lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 53
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=36 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Optional[int]=None , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Any ) -> int:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = 300
return config
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case :List[Any] = False
__snake_case :List[str] = False
__snake_case :Dict = False
__snake_case :Tuple = False
__snake_case :Optional[Any] = ()
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = MraModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = 5_0265
__lowercase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__lowercase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = 5_0265
__lowercase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = ProphetNetTokenizer
__snake_case :List[str] = False
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase = {}
for i, token in enumerate(_lowerCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowercase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowercase = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
__lowercase = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 1
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[int] = None
__snake_case :Union[str, Any] = BloomTokenizerFast
__snake_case :List[Any] = BloomTokenizerFast
__snake_case :Union[str, Any] = True
__snake_case :List[Any] = False
__snake_case :Dict = 'tokenizer_file'
__snake_case :int = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().setUp()
__lowercase = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Dict , **_lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__lowercase = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__lowercase = tokenizer.batch_encode_plus(_lowerCAmelCase )["""input_ids"""]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict=6 ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__lowercase = None # Hotfixing padding = None
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , )
def _a ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=_lowerCAmelCase )
__lowercase = next(iter(_lowerCAmelCase ) )["""premise"""] # pick up one data
__lowercase = list(sample_data.values() )
__lowercase = list(map(tokenizer.encode , _lowerCAmelCase ) )
__lowercase = [tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if numbers[j] < numbers[i]:
__lowercase , __lowercase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 53
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : int = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["""CLIPFeatureExtractor"""]
__UpperCamelCase : Optional[int] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :"DiagonalGaussianDistribution"
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :Dict = True
@register_to_config
def __init__( self : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _lowerCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _lowerCAmelCase : Tuple[int] = (64,) , _lowerCAmelCase : int = 1 , _lowerCAmelCase : str = "silu" , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 32 , _lowerCAmelCase : int = 32 , _lowerCAmelCase : float = 0.18_215 , ) -> Dict:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__lowercase = Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
# pass init params to Decoder
__lowercase = Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , act_fn=_lowerCAmelCase , )
__lowercase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
__lowercase = False
__lowercase = False
# only relevant if vae tiling is enabled
__lowercase = self.config.sample_size
__lowercase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowercase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowercase = 0.25
def _a ( self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=False ) -> List[str]:
"""simple docstring"""
if isinstance(_lowerCAmelCase , (Encoder, Decoder) ):
__lowercase = value
def _a ( self : Union[str, Any] , _lowerCAmelCase : bool = True ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = use_tiling
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
self.enable_tiling(_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = True
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a ( self : Dict ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
__lowercase = {}
def fn_recursive_add_processors(_lowerCAmelCase : str , _lowerCAmelCase : torch.nn.Module , _lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_lowerCAmelCase , """set_processor""" ):
__lowercase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def _a ( self : Tuple , _lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Any:
"""simple docstring"""
__lowercase = len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_lowerCAmelCase : str , _lowerCAmelCase : torch.nn.Module , _lowerCAmelCase : str ):
if hasattr(_lowerCAmelCase , """set_processor""" ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _a ( self : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowercase = [self.encoder(_lowerCAmelCase ) for x_slice in x.split(1 )]
__lowercase = torch.cat(_lowerCAmelCase )
else:
__lowercase = self.encoder(_lowerCAmelCase )
__lowercase = self.quant_conv(_lowerCAmelCase )
__lowercase = DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
__lowercase = self.post_quant_conv(_lowerCAmelCase )
__lowercase = self.decoder(_lowerCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
@apply_forward_hook
def _a ( self : Optional[int] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
__lowercase = [self._decode(_lowerCAmelCase ).sample for z_slice in z.split(1 )]
__lowercase = torch.cat(_lowerCAmelCase )
else:
__lowercase = self._decode(_lowerCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowerCAmelCase )
def _a ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = min(a.shape[2] , b.shape[2] , _lowerCAmelCase )
for y in range(_lowerCAmelCase ):
__lowercase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = min(a.shape[3] , b.shape[3] , _lowerCAmelCase )
for x in range(_lowerCAmelCase ):
__lowercase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _a ( self : Optional[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
__lowercase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowercase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowercase = []
for i in range(0 , x.shape[2] , _lowerCAmelCase ):
__lowercase = []
for j in range(0 , x.shape[3] , _lowerCAmelCase ):
__lowercase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowercase = self.encoder(_lowerCAmelCase )
__lowercase = self.quant_conv(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
__lowercase = []
for i, row in enumerate(_lowerCAmelCase ):
__lowercase = []
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
__lowercase = self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
__lowercase = torch.cat(_lowerCAmelCase , dim=2 )
__lowercase = DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__lowercase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowercase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowercase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowercase = []
for i in range(0 , z.shape[2] , _lowerCAmelCase ):
__lowercase = []
for j in range(0 , z.shape[3] , _lowerCAmelCase ):
__lowercase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowercase = self.post_quant_conv(_lowerCAmelCase )
__lowercase = self.decoder(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
__lowercase = []
for i, row in enumerate(_lowerCAmelCase ):
__lowercase = []
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowercase = self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
__lowercase = self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
__lowercase = torch.cat(_lowerCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def _a ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__lowercase = sample
__lowercase = self.encode(_lowerCAmelCase ).latent_dist
if sample_posterior:
__lowercase = posterior.sample(generator=_lowerCAmelCase )
else:
__lowercase = posterior.mode()
__lowercase = self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = 'audio-spectrogram-transformer'
def __init__( self : Any , _lowerCAmelCase : Optional[Any]=768 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : Optional[Any]=1024 , _lowerCAmelCase : Optional[int]=128 , **_lowerCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = patch_size
__lowercase = qkv_bias
__lowercase = frequency_stride
__lowercase = time_stride
__lowercase = max_length
__lowercase = num_mel_bins
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Any = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :int = ['pixel_values']
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_lowerCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = size if size is not None else {"""shortest_edge""": 224}
__lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowercase = int((256 / 224) * size["""shortest_edge"""] )
__lowercase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__lowercase = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_lowerCAmelCase , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : List[Any] , ) -> BatchFeature:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__lowercase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
__lowercase = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=13 , _lowerCAmelCase : str=3 , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : str=224 , _lowerCAmelCase : Tuple=1000 , _lowerCAmelCase : str=[3, 3, 6, 4] , _lowerCAmelCase : str=[48, 56, 112, 220] , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = num_labels
__lowercase = image_size
__lowercase = layer_depths
__lowercase = embed_dims
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , )
def _a ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int ) -> Any:
"""simple docstring"""
((__lowercase) , (__lowercase) , (__lowercase)) = self.prepare_config_and_inputs()
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__snake_case :List[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__snake_case :Tuple = False
__snake_case :List[Any] = False
__snake_case :List[str] = False
__snake_case :Tuple = False
__snake_case :int = False
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = SwiftFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
pass
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
def _config_zero_init(_lowerCAmelCase : Union[str, Any] ):
__lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
__lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
import argparse
from collections import defaultdict
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(lowerCamelCase , """r""" ) as f:
__lowercase = f.readlines()
__lowercase = F'class {class_name}('
__lowercase = F'{4 * " "}def {test_name}('
__lowercase = F'{8 * " "}{correct_line.split()[0]}'
__lowercase = F'{16 * " "}{correct_line.split()[0]}'
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = 0
__lowercase = 0
__lowercase = []
for line in lines:
if line.startswith(lowerCamelCase ):
__lowercase = True
elif in_class and line.startswith(lowerCamelCase ):
__lowercase = True
elif in_class and in_func and (line.startswith(lowerCamelCase ) or line.startswith(lowerCamelCase )):
__lowercase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowercase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowercase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
__lowercase = __lowercase = __lowercase = __lowercase = False
else:
new_lines.append(lowerCamelCase )
with open(lowerCamelCase , """w""" ) as f:
for line in new_lines:
f.write(lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
if fail is not None:
with open(lowerCamelCase , """r""" ) as f:
__lowercase = {l.strip() for l in f.readlines()}
else:
__lowercase = None
with open(lowerCamelCase , """r""" ) as f:
__lowercase = f.readlines()
__lowercase = defaultdict(lowerCamelCase )
for line in correct_lines:
__lowercase , __lowercase , __lowercase , __lowercase = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
__UpperCamelCase : int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCamelCase : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def snake_case ( lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=" " ):
'''simple docstring'''
__lowercase = text.split(lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase , __lowercase = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowerCamelCase ):
titles.append(title if title is not None else """""" )
texts.append(lowerCamelCase )
return {"title": titles, "text": texts}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__lowercase = ctx_encoder(input_ids.to(device=lowerCamelCase ) , return_dict=lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__lowercase = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__lowercase = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__lowercase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase )
__lowercase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__lowercase = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__lowercase = dataset.map(
partial(lowerCamelCase , ctx_encoder=lowerCamelCase , ctx_tokenizer=lowerCamelCase ) , batched=lowerCamelCase , batch_size=processing_args.batch_size , features=lowerCamelCase , )
# And finally save your dataset
__lowercase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__lowercase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowerCamelCase )
# And save the index
__lowercase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __UpperCamelCase :
__snake_case :str = field(
default=str(Path(_lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__snake_case :str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__snake_case :str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__snake_case :Optional[str] = field(
default=str(Path(_lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[int] = field(
default=_lowerCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__snake_case :int = field(
default=1_6 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __UpperCamelCase :
__snake_case :int = field(
default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCamelCase : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCamelCase : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case ( lowerCamelCase , lowerCamelCase=0.999 , lowerCamelCase="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowercase = []
for i in range(lowerCamelCase ):
__lowercase = i / num_diffusion_timesteps
__lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) , lowerCamelCase ) )
return torch.tensor(lowerCamelCase , dtype=torch.floataa )
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :Tuple = [e.name for e in KarrasDiffusionSchedulers]
__snake_case :Optional[int] = 2
@register_to_config
def __init__( self : List[str] , _lowerCAmelCase : int = 1000 , _lowerCAmelCase : float = 0.00_085 , _lowerCAmelCase : float = 0.012 , _lowerCAmelCase : str = "linear" , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase : str = "epsilon" , _lowerCAmelCase : Optional[bool] = False , _lowerCAmelCase : Optional[bool] = False , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : str = "linspace" , _lowerCAmelCase : int = 0 , ) -> List[Any]:
"""simple docstring"""
if trained_betas is not None:
__lowercase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowercase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
__lowercase = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowercase = 1.0 - self.betas
__lowercase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = use_karras_sigmas
def _a ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
if schedule_timesteps is None:
__lowercase = self.timesteps
__lowercase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowercase = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
__lowercase = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
__lowercase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _a ( self : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
__lowercase = self.index_for_timestep(_lowerCAmelCase )
__lowercase = self.sigmas[step_index]
__lowercase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _a ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None , _lowerCAmelCase : Optional[int] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowercase = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowercase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowercase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowercase = np.log(_lowerCAmelCase )
__lowercase = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase )
if self.config.use_karras_sigmas:
__lowercase = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps )
__lowercase = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] )
__lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowercase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
__lowercase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowercase = torch.from_numpy(_lowerCAmelCase )
__lowercase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith("""mps""" ):
# mps does not support float64
__lowercase = timesteps.to(_lowerCAmelCase , dtype=torch.floataa )
else:
__lowercase = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
__lowercase = None
__lowercase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowercase = defaultdict(_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = np.log(_lowerCAmelCase )
# get distribution
__lowercase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowercase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowercase = low_idx + 1
__lowercase = log_sigmas[low_idx]
__lowercase = log_sigmas[high_idx]
# interpolate sigmas
__lowercase = (low - log_sigma) / (low - high)
__lowercase = np.clip(_lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
__lowercase = (1 - w) * low_idx + w * high_idx
__lowercase = t.reshape(sigma.shape )
return t
def _a ( self : Tuple , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int ) -> torch.FloatTensor:
"""simple docstring"""
__lowercase = in_sigmas[-1].item()
__lowercase = in_sigmas[0].item()
__lowercase = 7.0 # 7.0 is the value used in the paper
__lowercase = np.linspace(0 , 1 , _lowerCAmelCase )
__lowercase = sigma_min ** (1 / rho)
__lowercase = sigma_max ** (1 / rho)
__lowercase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
return self.dt is None
def _a ( self : Union[str, Any] , _lowerCAmelCase : Union[torch.FloatTensor, np.ndarray] , _lowerCAmelCase : Union[float, torch.FloatTensor] , _lowerCAmelCase : Union[torch.FloatTensor, np.ndarray] , _lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
__lowercase = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowercase = self.sigmas[step_index]
__lowercase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowercase = self.sigmas[step_index - 1]
__lowercase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowercase = 0
__lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowercase = sigma_hat if self.state_in_first_order else sigma_next
__lowercase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = sigma_hat if self.state_in_first_order else sigma_next
__lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowercase = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__lowercase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowercase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowercase = sigma_next - sigma_hat
# store for 2nd order step
__lowercase = derivative
__lowercase = dt
__lowercase = sample
else:
# 2. 2nd order / Heun's method
__lowercase = (sample - pred_original_sample) / sigma_next
__lowercase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowercase = self.dt
__lowercase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
__lowercase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
__lowercase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowercase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowercase = self.timesteps.to(original_samples.device )
__lowercase = timesteps.to(original_samples.device )
__lowercase = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps]
__lowercase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowercase = sigma.unsqueeze(-1 )
__lowercase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
from math import ceil
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = list(range(0 , lowerCamelCase ) )
__lowercase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowercase = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase )
# Missing blocks
__lowercase = [i for i in blocks if i not in device_map_blocks]
__lowercase = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(lowerCamelCase ) )
if len(lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(lowerCamelCase ) )
if len(lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(lowerCamelCase ) )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = list(range(lowerCamelCase ) )
__lowercase = int(ceil(n_layers / len(lowerCamelCase ) ) )
__lowercase = [layers[i : i + n_blocks] for i in range(0 , lowerCamelCase , lowerCamelCase )]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=13 , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Any=None , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = True
__lowercase = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
__lowercase = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
__lowercase = True
__lowercase = True
__lowercase = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["""hidden_states"""][0]
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__snake_case :List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
__snake_case :Tuple = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Union[str, Any] = False
__snake_case :int = False
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = LlamaModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(_lowerCAmelCase )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : int ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """single_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(_lowerCAmelCase )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """multi_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(_lowerCAmelCase )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
__lowercase = original_model(_lowerCAmelCase ).last_hidden_state
__lowercase = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
__lowercase = scaled_model(_lowerCAmelCase ).last_hidden_state
__lowercase = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__lowercase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__lowercase = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : int ) -> Any:
"""simple docstring"""
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__lowercase = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
__lowercase = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__lowercase = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__lowercase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__lowercase = model(torch.tensor(_lowerCAmelCase ) )
__lowercase = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
__lowercase = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__lowercase = """Simply put, the theory of relativity states that """
__lowercase = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__lowercase = tokenizer.encode(_lowerCAmelCase , return_tensors="""pt""" )
__lowercase = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
__lowercase = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
__lowercase = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 53
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCamelCase : Any = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
__UpperCamelCase : List[str] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCamelCase : Union[str, Any] = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
__UpperCamelCase : int = requests.get(image_url).content
__UpperCamelCase : Dict = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'status' must been from type bool"
return status
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase ) ):
for j in range(i + 1 , len(lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase ):
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase ):
while quotient != 1:
if is_prime(lowerCamelCase ) and (quotient % factor == 0):
ans.append(lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase )
__lowercase = max(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase ) and (number > 2) and is_even(lowerCamelCase )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(lowerCamelCase )
__lowercase = len(lowerCamelCase )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (len(lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(lowerCamelCase )
__lowercase = prime_factorization(lowerCamelCase )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(lowerCamelCase , lowerCamelCase )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(lowerCamelCase )
__lowercase = prime_fac_a.count(lowerCamelCase )
for _ in range(max(lowerCamelCase , lowerCamelCase ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase ):
ans += 1
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and is_prime(
lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase ) and is_prime(lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and ans[0] != p_number_a
and ans[len(lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(lowerCamelCase )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(lowerCamelCase ) , abs(lowerCamelCase ) )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 53
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__UpperCamelCase : Dict = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , torch.Tensor ):
return image
elif isinstance(lowerCamelCase , PIL.Image.Image ):
__lowercase = [image]
__lowercase = [trans(img.convert("""RGB""" ) ) for img in image]
__lowercase = torch.stack(lowerCamelCase )
return image
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowercase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
__lowercase = max(num_inference_steps - init_timestep , 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCAmelCase )}' )
__lowercase = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__lowercase = init_latents.shape
__lowercase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
print("""add noise to latents at timestep""" , _lowerCAmelCase )
__lowercase = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowerCAmelCase : float = 0.8 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_lowerCAmelCase )
# 2. Preprocess image
__lowercase = preprocess(_lowerCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase , device=self.device )
__lowercase , __lowercase = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
__lowercase = timesteps[:1].repeat(_lowerCAmelCase )
# 4. Prepare latent variables
__lowercase = self.prepare_latents(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.unet.dtype , self.device , _lowerCAmelCase )
__lowercase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCAmelCase ):
# 1. predict noise model_output
__lowercase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowercase = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , eta=_lowerCAmelCase , use_clipped_model_output=_lowerCAmelCase , generator=_lowerCAmelCase , ).prev_sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCAmelCase )
| 53
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :Optional[int] = 'convnextv2'
def __init__( self : Dict , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : str=None , _lowerCAmelCase : str=None , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : str , ) -> Any:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_stages
__lowercase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__lowercase = [3, 3, 9, 3] if depths is None else depths
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = drop_path_rate
__lowercase = image_size
__lowercase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 53
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Union[str, Any] = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 1
|
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__lowercase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
__lowercase = 1 - (matter_density + radiation_density + dark_energy)
__lowercase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowercase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__UpperCamelCase : int = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__UpperCamelCase : List[Any] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__UpperCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( ):
'''simple docstring'''
__lowercase = """https://pypi.org/pypi/diffusers/json"""
__lowercase = json.loads(request.urlopen(lowerCamelCase ).read() )["""releases"""].keys()
return sorted(lowerCamelCase , key=lambda lowerCamelCase : version.Version(lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
__lowercase = Path(lowerCamelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
init_hf_modules()
__lowercase = Path(lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
__lowercase = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
__lowercase = f.read()
# Imports of the form `import .xxx`
__lowercase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase ) )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = False
__lowercase = [module_file]
__lowercase = []
# Let's recurse through all relative imports
while not no_change:
__lowercase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase ) )
__lowercase = Path(lowerCamelCase ).parent
__lowercase = [str(module_path / m ) for m in new_imports]
__lowercase = [f for f in new_import_files if f not in all_relative_imports]
__lowercase = [F'{f}.py' for f in new_import_files]
__lowercase = len(lowerCamelCase ) == 0
all_relative_imports.extend(lowerCamelCase )
return all_relative_imports
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
__lowercase = f.read()
# Imports of the form `import xxx`
__lowercase = re.findall("""^\s*import\s+(\S+)\s*$""" , lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__lowercase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__lowercase = list(set(lowerCamelCase ) )
__lowercase = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase )
except ImportError:
missing_packages.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'{", ".join(lowerCamelCase )}. Run `pip install {" ".join(lowerCamelCase )}`' )
return get_relative_imports(lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = module_path.replace(os.path.sep , """.""" )
__lowercase = importlib.import_module(lowerCamelCase )
if class_name is None:
return find_pipeline_class(lowerCamelCase )
return getattr(lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__lowercase = dict(inspect.getmembers(lowerCamelCase , inspect.isclass ) )
__lowercase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
__lowercase = cls
return pipeline_class
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
'''simple docstring'''
__lowercase = str(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ):
__lowercase = module_file_or_url
__lowercase = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__lowercase = get_diffusers_versions()
# cut ".dev0"
__lowercase = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__lowercase = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__lowercase = F'v{revision}'
elif revision == "main":
__lowercase = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__lowercase = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase , pipeline=lowerCamelCase )
try:
__lowercase = cached_download(
lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , )
__lowercase = """git"""
__lowercase = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__lowercase = hf_hub_download(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , )
__lowercase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__lowercase = check_imports(lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
__lowercase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase )
__lowercase = Path(lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__lowercase = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase , lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = use_auth_token
elif use_auth_token is True:
__lowercase = HfFolder.get_token()
else:
__lowercase = None
__lowercase = model_info(lowerCamelCase , revision=lowerCamelCase , token=lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowercase = submodule_path / commit_hash
__lowercase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase , F'{module_needed}.py' , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
return os.path.join(lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , **lowerCamelCase , ):
'''simple docstring'''
__lowercase = get_cached_module_file(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
return get_class_in_module(lowerCamelCase , final_module.replace(""".py""" , """""" ) )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
import torch
def snake_case ( ):
'''simple docstring'''
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 53
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 1
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __UpperCamelCase :
__snake_case :str = PegasusConfig
__snake_case :List[Any] = {}
__snake_case :Union[str, Any] = 'gelu'
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=13 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=40 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Optional[int]=0 , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_pegasus_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFPegasusModel(config=_lowerCAmelCase ).get_decoder()
__lowercase = inputs_dict["""input_ids"""]
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict["""attention_mask"""][:1, :]
__lowercase = inputs_dict["""head_mask"""]
__lowercase = 1
# first forward pass
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-3 )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__snake_case :Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__snake_case :Tuple = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case :int = True
__snake_case :Dict = False
__snake_case :Dict = False
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFPegasusModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
__snake_case :Any = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__snake_case :List[str] = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__snake_case :Union[str, Any] = 'google/pegasus-xsum'
@cached_property
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self : Tuple , **_lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.translate_src_text(**_lowerCAmelCase )
assert self.expected_text == generated_words
def _a ( self : str , **_lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , **_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""tf""" )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCAmelCase , )
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 53
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'speech_to_text_2'
__snake_case :str = ['past_key_values']
__snake_case :Tuple = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , _lowerCAmelCase : List[Any]=1_0000 , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]="relu" , _lowerCAmelCase : int=256 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=1024 , **_lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = d_model
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = decoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = max_target_positions
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from __future__ import annotations
import numpy as np
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from math import isclose, sqrt
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = point_y / 4 / point_x
__lowercase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowercase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowercase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowercase = outgoing_gradient**2 + 4
__lowercase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowercase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__lowercase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowercase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowercase = x_minus if isclose(lowerCamelCase , lowerCamelCase ) else x_plus
__lowercase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case ( lowerCamelCase = 1.4 , lowerCamelCase = -9.6 ):
'''simple docstring'''
__lowercase = 0
__lowercase = first_x_coord
__lowercase = first_y_coord
__lowercase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowercase , __lowercase , __lowercase = next_point(lowerCamelCase , lowerCamelCase , lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.nan
for i in range(lowerCamelCase ):
__lowercase = features[:, labels == i]
__lowercase = data.mean(1 )
# Centralize the data of class i
__lowercase = data - column_reshape(lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowercase = np.dot(lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = features.mean(1 )
__lowercase = np.nan
for i in range(lowerCamelCase ):
__lowercase = features[:, labels == i]
__lowercase = data.shape[1]
__lowercase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowercase = device_data * np.dot(
column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase ) , (column_reshape(lowerCamelCase ) - column_reshape(lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if features.any():
__lowercase = features.mean(1 )
# Center the dataset
__lowercase = features - np.reshape(lowerCamelCase , (data_mean.size, 1) )
__lowercase = np.dot(lowerCamelCase , centered_data.T ) / features.shape[1]
__lowercase , __lowercase = np.linalg.eigh(lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowercase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowercase = np.dot(filtered_eigenvectors.T , lowerCamelCase )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowerCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowercase , __lowercase = eigh(
covariance_between_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , covariance_within_classes(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , )
__lowercase = eigenvectors[:, ::-1][:, :dimensions]
__lowercase , __lowercase , __lowercase = np.linalg.svd(lowerCamelCase )
__lowercase = svd_matrix[:, 0:dimensions]
__lowercase = np.dot(filtered_svd_matrix.T , lowerCamelCase )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowerCamelCase )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case ( ):
'''simple docstring'''
__lowercase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowercase = np.array([0, 0, 0, 1, 1] )
__lowercase = 2
__lowercase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase ) as error_info:
__lowercase = linear_discriminant_analysis(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def snake_case ( ):
'''simple docstring'''
__lowercase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowercase = 2
__lowercase = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase ) as error_info:
__lowercase = principal_component_analysis(lowerCamelCase , lowerCamelCase )
if not np.allclose(lowerCamelCase , lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
__UpperCamelCase : Optional[int] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.getbasetemp() / """cache"""
__lowercase = test_hf_cache_home / """datasets"""
__lowercase = test_hf_cache_home / """metrics"""
__lowercase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCamelCase ) )
__lowercase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCamelCase ) )
__lowercase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCamelCase ) )
@pytest.fixture(autouse=lowerCamelCase , scope="""session""" )
def snake_case ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCamelCase )
@pytest.fixture
def snake_case ( lowerCamelCase ):
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCamelCase )
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
import re
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
try:
__lowercase = split_input(lowerCamelCase )
if upper:
__lowercase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowercase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return to_simple_case(lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
try:
__lowercase = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return to_complex_case(lowerCamelCase , lowerCamelCase , """_""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return to_complex_case(lowerCamelCase , lowerCamelCase , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__UpperCamelCase : Tuple = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
def snake_case ( lowerCamelCase , lowerCamelCase = 0 ):
'''simple docstring'''
__lowercase = length or len(lowerCamelCase )
__lowercase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowercase , __lowercase = list_data[i + 1], list_data[i]
__lowercase = True
return list_data if not swapped else bubble_sort(lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
import logging
from transformers import PretrainedConfig
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
__UpperCamelCase : Tuple = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'bertabs'
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any=3_0522 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : str=6 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : List[str]=8 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : Any=0.2 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : str=2048 , _lowerCAmelCase : int=0.2 , **_lowerCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = max_pos
__lowercase = enc_layers
__lowercase = enc_hidden_size
__lowercase = enc_heads
__lowercase = enc_ff_size
__lowercase = enc_dropout
__lowercase = dec_layers
__lowercase = dec_hidden_size
__lowercase = dec_heads
__lowercase = dec_ff_size
__lowercase = dec_dropout
| 53
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = current_set.copy()
for row_index, row in enumerate(lowerCamelCase ):
__lowercase = row[0]
for column_index, column in enumerate(lowerCamelCase ):
if magnitude == 0:
__lowercase = column
continue
__lowercase = column / magnitude
# Subtract to cancel term
__lowercase = current_set[0]
__lowercase = [first_row]
__lowercase = current_set[1::]
for row in current_set:
__lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase )
continue
for column_index in range(len(lowerCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowercase = final_set[0]
__lowercase = []
__lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowercase = simplify(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase )
__lowercase = resultant
return final_set
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
__lowercase = len(lowerCamelCase ) + 1
if any(len(lowerCamelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCamelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowercase = equations.copy()
if any(0 in row for row in data_set ):
__lowercase = data_set.copy()
__lowercase = []
for row_index, row in enumerate(lowerCamelCase ):
if 0 not in row:
__lowercase = data_set.pop(lowerCamelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCamelCase )
__lowercase = data_set.copy()
__lowercase = simplify(lowerCamelCase )
__lowercase = simplified[::-1]
__lowercase = []
for row in simplified:
__lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowercase = row.copy()[: len(lowerCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase ) == 0:
solutions.append(0 )
continue
__lowercase = temp_row[1::]
__lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase )
__lowercase = []
for item in solutions:
final.append(float(round(lowerCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 53
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
__snake_case :List[str]
__snake_case :Optional[str] = None
# Automatically constructed
__snake_case :ClassVar[str] = "dict"
__snake_case :ClassVar[Any] = None
__snake_case :str = field(default='Translation' , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def __call__( self : Any ) -> List[str]:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _a ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
__snake_case :Optional[List] = None
__snake_case :Optional[int] = None
__snake_case :Optional[str] = None
# Automatically constructed
__snake_case :ClassVar[str] = "dict"
__snake_case :ClassVar[Any] = None
__snake_case :str = field(default='TranslationVariableLanguages' , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def _a ( self : Any ) -> str:
"""simple docstring"""
__lowercase = sorted(set(self.languages ) ) if self.languages else None
__lowercase = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> str:
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def _a ( self : Tuple , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = set(self.languages )
if self.languages and set(_lowerCAmelCase ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(_lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_lowerCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__lowercase = []
for lang, text in translation_dict.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__lowercase , __lowercase = zip(*sorted(_lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def _a ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase : Any = 16
__UpperCamelCase : Tuple = 32
def snake_case ( lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = "bert-base-cased" ):
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
__lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
model.eval()
__lowercase = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase ) - 1:
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
__lowercase = metric.compute()
return eval_metric["accuracy"]
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
__lowercase = args.model_name_or_path
set_seed(lowerCamelCase )
__lowercase , __lowercase = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase = 1
__lowercase = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
__lowercase = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
__lowercase = evaluate.load("""glue""" , """mrpc""" )
__lowercase = num_epochs
if args.partial_train_epoch is not None:
__lowercase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowercase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowercase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowercase = int(lowerCamelCase ) + 1
__lowercase = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowercase = json.load(lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowercase = {}
for epoch in range(lowerCamelCase , lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowercase = F'epoch_{epoch}'
__lowercase = os.path.join(args.output_dir , lowerCamelCase )
accelerator.save_state(lowerCamelCase )
__lowercase = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = accuracy
__lowercase = lr_scheduler.get_lr()[0]
__lowercase = optimizer.param_groups[0]["""lr"""]
__lowercase = epoch
__lowercase = overall_step
accelerator.print(F'epoch {epoch}:' , lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase , default=lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCamelCase , default=lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=2 , help="""Number of train epochs.""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 53
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Dict=18 , _lowerCAmelCase : str=30 , _lowerCAmelCase : Tuple=400 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=None , ) -> str:
"""simple docstring"""
__lowercase = size if size is not None else {"""shortest_edge""": 20}
__lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :List[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MobileNetVaImageProcessingTester(self )
@property
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """crop_size""" ) )
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 53
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 1
|
import requests
__UpperCamelCase : Optional[Any] = """""" # <-- Put your OpenWeatherMap appid here!
__UpperCamelCase : Optional[int] = """https://api.openweathermap.org/data/2.5/"""
def snake_case ( lowerCamelCase = "Chicago" , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def snake_case ( lowerCamelCase = "Kolkata, India" , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def snake_case ( lowerCamelCase = 55.68 , lowerCamelCase = 12.57 , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__UpperCamelCase : List[Any] = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 53
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def _a ( self : int ) -> Dict:
"""simple docstring"""
debug_launcher(test_ops.main )
| 53
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
def snake_case ( lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=16 , lowerCamelCase = 10 , lowerCamelCase = 2 ):
'''simple docstring'''
def get_dataset(lowerCamelCase ):
__lowercase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowercase = get_dataset(lowerCamelCase )
__lowercase = get_dataset(lowerCamelCase )
__lowercase = DataLoader(lowerCamelCase , shuffle=lowerCamelCase , batch_size=lowerCamelCase , num_workers=4 )
__lowercase = DataLoader(lowerCamelCase , shuffle=lowerCamelCase , batch_size=lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
__lowercase = []
for epoch in range(lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__lowercase , __lowercase = batch
__lowercase = model(lowerCamelCase )
__lowercase = torch.nn.functional.mse_loss(lowerCamelCase , lowerCamelCase )
accelerator.backward(lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __UpperCamelCase ( nn.Module ):
def __init__( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Parameter(torch.randn(1 ) )
__lowercase = nn.Parameter(torch.randn(1 ) )
def _a ( self : Tuple , _lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return x * self.a + self.b
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowercase , __lowercase = dummy_dataloaders()
__lowercase = ProjectConfiguration(total_limit=1 , project_dir=_lowerCAmelCase , automatic_checkpoint_naming=_lowerCAmelCase )
# Train baseline
__lowercase = Accelerator(project_config=_lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowercase , __lowercase = dummy_dataloaders()
# Train baseline
__lowercase = Accelerator()
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save initial
__lowercase = os.path.join(_lowerCAmelCase , """initial""" )
accelerator.save_state(_lowerCAmelCase )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
__lowercase = train(3 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowercase , __lowercase = dummy_dataloaders()
__lowercase = Accelerator()
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.load_state(_lowerCAmelCase )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = train(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save everything
__lowercase = os.path.join(_lowerCAmelCase , """checkpoint""" )
accelerator.save_state(_lowerCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowerCAmelCase )
test_rands += train(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowercase , __lowercase = dummy_dataloaders()
__lowercase = ProjectConfiguration(automatic_checkpoint_naming=_lowerCAmelCase )
# Train baseline
__lowercase = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save initial
accelerator.save_state()
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
__lowercase = train(3 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowercase , __lowercase = dummy_dataloaders()
__lowercase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowerCAmelCase )
__lowercase = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.load_state(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = train(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((__lowercase) , (__lowercase)) = model.a.item(), model.b.item()
__lowercase = optimizer.state_dict()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = torch.tensor([1, 2, 3] )
__lowercase = torch.tensor([2, 3, 4] )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(net.parameters() )
__lowercase = Accelerator()
with self.assertRaises(_lowerCAmelCase ) as ve:
accelerator.register_for_checkpointing(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase = DummyModel()
__lowercase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowercase = torch.optim.lr_scheduler.StepLR(_lowerCAmelCase , step_size=1 , gamma=0.99 )
__lowercase , __lowercase = dummy_dataloaders()
__lowercase = ProjectConfiguration(automatic_checkpoint_naming=_lowerCAmelCase )
# Train baseline
__lowercase = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save initial
accelerator.save_state()
__lowercase = scheduler.state_dict()
train(3 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertNotEqual(_lowerCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(_lowerCAmelCase , scheduler.state_dict() )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase = DummyModel()
__lowercase = ProjectConfiguration(automatic_checkpoint_naming=_lowerCAmelCase , total_limit=2 )
# Train baseline
__lowercase = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase )
__lowercase = accelerator.prepare(_lowerCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase : Dict = """/tmp/accelerate/state_checkpointing"""
__UpperCamelCase : str = DummyModel()
__UpperCamelCase : int = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__UpperCamelCase : List[str] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCamelCase , __UpperCamelCase : Optional[int] = dummy_dataloaders()
__UpperCamelCase : str = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCamelCase : List[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCamelCase , __UpperCamelCase : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCamelCase : Optional[int] = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
__UpperCamelCase : int = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
__UpperCamelCase : Optional[Any] = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
__UpperCamelCase : str = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 53
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Union[str, Any]=0.9 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , _lowerCAmelCase : str=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
__lowercase = size if size is not None else {"""shortest_edge""": 30}
__lowercase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize_and_center_crop
__lowercase = size
__lowercase = crop_pct
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = PoolFormerImageProcessor if is_vision_available() else None
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = PoolFormerImageProcessingTester(self )
@property
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """crop_pct""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : List[Any]=17 , _lowerCAmelCase : Optional[Any]=23 , _lowerCAmelCase : Optional[int]=11 , _lowerCAmelCase : List[str]=True , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = act_dim
__lowercase = state_dim
__lowercase = hidden_size
__lowercase = max_length
__lowercase = is_training
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowercase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowercase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowercase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowercase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__lowercase = random_attention_mask((self.batch_size, self.seq_length) )
__lowercase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _a ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = DecisionTransformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__snake_case :Optional[int] = ()
__snake_case :Optional[Any] = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__snake_case :Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__snake_case :str = False
__snake_case :int = False
__snake_case :List[Any] = False
__snake_case :Any = False
__snake_case :List[Any] = False
__snake_case :Any = False
__snake_case :Any = False
__snake_case :List[Any] = False
__snake_case :Optional[int] = False
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = DecisionTransformerModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DecisionTransformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(_lowerCAmelCase )] , _lowerCAmelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 2 # number of steps of autoregressive prediction we will perform
__lowercase = 10 # defined by the RL environment, may be normalized
__lowercase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__lowercase = model.to(_lowerCAmelCase )
__lowercase = model.config
torch.manual_seed(0 )
__lowercase = torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ) # env.reset()
__lowercase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=_lowerCAmelCase )
__lowercase = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowercase = state
__lowercase = torch.zeros(1 , 0 , config.act_dim , device=_lowerCAmelCase , dtype=torch.floataa )
__lowercase = torch.zeros(1 , 0 , device=_lowerCAmelCase , dtype=torch.floataa )
__lowercase = torch.tensor(0 , device=_lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_lowerCAmelCase ):
__lowercase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowerCAmelCase )] , dim=1 )
__lowercase = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowerCAmelCase )] , dim=1 )
__lowercase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowercase , __lowercase , __lowercase = model(
states=_lowerCAmelCase , actions=_lowerCAmelCase , rewards=_lowerCAmelCase , returns_to_go=_lowerCAmelCase , timesteps=_lowerCAmelCase , attention_mask=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__lowercase , __lowercase , __lowercase , __lowercase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowercase = action_pred[0, -1]
__lowercase = torch.cat([states, state] , dim=1 )
__lowercase = returns_to_go[0, -1] - reward
__lowercase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowercase = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
from math import factorial
def snake_case ( lowerCamelCase = 20 ):
'''simple docstring'''
__lowercase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowercase = n // 2
return int(factorial(lowerCamelCase ) / (factorial(lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 53
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 1
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
__lowercase = emb.weight.data
return lin_layer
def snake_case ( lowerCamelCase , lowerCamelCase="facebook/mbart-large-en-ro" , lowerCamelCase=False , lowerCamelCase=False ):
'''simple docstring'''
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(lowerCamelCase )
__lowercase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowercase = MBartConfig.from_pretrained(lowerCamelCase , vocab_size=lowerCamelCase )
if mbart_aa and finetuned:
__lowercase = """relu"""
__lowercase = state_dict["""decoder.embed_tokens.weight"""]
__lowercase = MBartForConditionalGeneration(lowerCamelCase )
model.model.load_state_dict(lowerCamelCase )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Any = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 53
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 1
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : int=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : int=4 , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = True
__snake_case :Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
__lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _lowerCAmelCase )
# compare the actual values for a slice.
__lowercase = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
__lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__lowercase = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
__lowercase = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
# Algorithm for the pigeonhole sorting
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = min(lowerCamelCase ) # min() finds the minimum value
__lowercase = max(lowerCamelCase ) # max() finds the maximum value
__lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCamelCase , lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase = 0
for count in range(lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
__lowercase = count + min_val
i += 1
def snake_case ( ):
'''simple docstring'''
__lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCamelCase )
print("""Sorted order is:""" , """ """.join(lowerCamelCase ) )
if __name__ == "__main__":
main()
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = 'swin2sr'
__snake_case :str = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , _lowerCAmelCase : Any=64 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : str=180 , _lowerCAmelCase : str=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Dict=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : Dict=2.0 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[Any]=1.0 , _lowerCAmelCase : Dict="1conv" , _lowerCAmelCase : Union[str, Any]="pixelshuffle" , **_lowerCAmelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(_lowerCAmelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = False
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__UpperCamelCase : str = parser.parse_args()
__UpperCamelCase : Dict = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__UpperCamelCase : List[str] = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__UpperCamelCase : Optional[int] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__UpperCamelCase : List[str] = reader.read()
__UpperCamelCase : Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__UpperCamelCase : Tuple = UNetaDModel(**config)
else:
__UpperCamelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__UpperCamelCase : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__UpperCamelCase : Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__UpperCamelCase : List[str] = config[key]
del config[key]
__UpperCamelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__UpperCamelCase : Any = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__UpperCamelCase : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__UpperCamelCase : Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__UpperCamelCase : List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__UpperCamelCase : int = param_value
__UpperCamelCase : Dict = True
if not has_changed:
__UpperCamelCase : List[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = DanceDiffusionPipeline
__snake_case :Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__snake_case :Any = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__snake_case :Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__snake_case :List[Any] = False
__snake_case :Optional[Any] = False
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCAmelCase , use_timestep_embedding=_lowerCAmelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
__lowercase = IPNDMScheduler()
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _a ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0 ) -> int:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = DanceDiffusionPipeline(**_lowerCAmelCase )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.audios
__lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowercase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = torch_device
__lowercase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(generator=_lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
__lowercase = output.audios
__lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowercase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = torch_device
__lowercase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
__lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(generator=_lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
__lowercase = output.audios
__lowercase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowercase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__UpperCamelCase : Optional[Any] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
warnings.warn(lowerCamelCase , lowerCamelCase )
requires_backends(lowerCamelCase , """sklearn""" )
return (preds == labels).mean()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
warnings.warn(lowerCamelCase , lowerCamelCase )
requires_backends(lowerCamelCase , """sklearn""" )
__lowercase = simple_accuracy(lowerCamelCase , lowerCamelCase )
__lowercase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
warnings.warn(lowerCamelCase , lowerCamelCase )
requires_backends(lowerCamelCase , """sklearn""" )
__lowercase = pearsonr(lowerCamelCase , lowerCamelCase )[0]
__lowercase = spearmanr(lowerCamelCase , lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
warnings.warn(lowerCamelCase , lowerCamelCase )
requires_backends(lowerCamelCase , """sklearn""" )
assert len(lowerCamelCase ) == len(lowerCamelCase ), F'Predictions and labels have mismatched lengths {len(lowerCamelCase )} and {len(lowerCamelCase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase , lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase , lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
warnings.warn(lowerCamelCase , lowerCamelCase )
requires_backends(lowerCamelCase , """sklearn""" )
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(lowerCamelCase )} and {len(lowerCamelCase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(lowerCamelCase )
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :torch.FloatTensor
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Tuple , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _lowerCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _lowerCAmelCase : Tuple[int] = (64,) , _lowerCAmelCase : int = 1 , _lowerCAmelCase : str = "silu" , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 32 , _lowerCAmelCase : int = 256 , _lowerCAmelCase : int = 32 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : float = 0.18_215 , _lowerCAmelCase : str = "group" , ) -> Dict:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__lowercase = Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
__lowercase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
__lowercase = VectorQuantizer(_lowerCAmelCase , _lowerCAmelCase , beta=0.25 , remap=_lowerCAmelCase , sane_index_shape=_lowerCAmelCase )
__lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase = Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , norm_type=_lowerCAmelCase , )
@apply_forward_hook
def _a ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> VQEncoderOutput:
"""simple docstring"""
__lowercase = self.encoder(_lowerCAmelCase )
__lowercase = self.quant_conv(_lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCAmelCase )
@apply_forward_hook
def _a ( self : Any , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
__lowercase , __lowercase , __lowercase = self.quantize(_lowerCAmelCase )
else:
__lowercase = h
__lowercase = self.post_quant_conv(_lowerCAmelCase )
__lowercase = self.decoder(_lowerCAmelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def _a ( self : List[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__lowercase = sample
__lowercase = self.encode(_lowerCAmelCase ).latents
__lowercase = self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : int = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__UpperCamelCase : Optional[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__UpperCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__UpperCamelCase : Tuple = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCamelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[Any] = VOCAB_FILES_NAMES
__snake_case :Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case :Optional[int] = DPRContextEncoderTokenizer
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = VOCAB_FILES_NAMES
__snake_case :Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case :Tuple = DPRQuestionEncoderTokenizer
__UpperCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__UpperCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__UpperCamelCase : str = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(_lowerCAmelCase )
class __UpperCamelCase :
def __call__( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[bool] = None , **_lowerCAmelCase : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
elif titles is None or texts is None:
__lowercase = titles if texts is None else texts
return super().__call__(
_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = titles if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [titles]
__lowercase = texts if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [texts]
__lowercase = len(_lowerCAmelCase )
__lowercase = questions if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [questions] * n_passages
assert len(_lowerCAmelCase ) == len(
_lowerCAmelCase ), F'There should be as many titles than texts but got {len(_lowerCAmelCase )} titles and {len(_lowerCAmelCase )} texts.'
__lowercase = super().__call__(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )["""input_ids"""]
__lowercase = super().__call__(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )["""input_ids"""]
__lowercase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCAmelCase , _lowerCAmelCase )
]
}
if return_attention_mask is not False:
__lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase = attention_mask
return self.pad(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : BatchEncoding , _lowerCAmelCase : DPRReaderOutput , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__lowercase = reader_input["""input_ids"""]
__lowercase , __lowercase , __lowercase = reader_output[:3]
__lowercase = len(_lowerCAmelCase )
__lowercase = sorted(range(_lowerCAmelCase ) , reverse=_lowerCAmelCase , key=relevance_logits.__getitem__ )
__lowercase = []
for doc_id in sorted_docs:
__lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase = sequence_ids.index(self.pad_token_id )
else:
__lowercase = len(_lowerCAmelCase )
__lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCAmelCase , top_spans=_lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCAmelCase , start_index=_lowerCAmelCase , end_index=_lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__lowercase = []
for start_index, start_score in enumerate(_lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase )
__lowercase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__lowercase = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :int = VOCAB_FILES_NAMES
__snake_case :Dict = READER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Tuple = READER_PRETRAINED_INIT_CONFIGURATION
__snake_case :List[Any] = ['input_ids', 'attention_mask']
__snake_case :Any = DPRReaderTokenizer
| 53
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=13 , _lowerCAmelCase : Tuple=30 , _lowerCAmelCase : int=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Dict=37 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : List[str]=10 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=0.6 , _lowerCAmelCase : List[Any]=None , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = ViTMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = ViTMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__snake_case :Optional[int] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__snake_case :Optional[int] = False
__snake_case :str = False
__snake_case :Dict = False
__snake_case :Any = False
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = model_class.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : int ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase , noise=torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase ) )
# verify the logits
__lowercase = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCAmelCase ) , atol=1e-4 ) )
| 53
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCamelCase : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCamelCase : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __UpperCamelCase :
__snake_case :int
__snake_case :Node | None
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
__lowercase = None
for i in sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ):
__lowercase = Node(_lowerCAmelCase , self.head )
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
__lowercase = self.head
while node:
yield node.data
__lowercase = node.next_node
def __len__( self : Dict ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : int ) -> str:
"""simple docstring"""
return " -> ".join([str(_lowerCAmelCase ) for node in self] )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase ) + list(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.