code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : bool , snake_case : list[int] , snake_case : float )-> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
UpperCAmelCase__ : List[str] = math.log(len(snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , snake_case , snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 364
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 0
|
"""simple docstring"""
import re
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> bool:
'''simple docstring'''
UpperCAmelCase__ : Dict = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(snake_case , snake_case ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 365
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 0
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase : Any = """bart"""
_lowerCAmelCase : int = True
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCAmelCase__ : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCAmelCase__ : Dict = qar_model.eval()
else:
UpperCAmelCase__ : Dict = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCAmelCase__ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCAmelCase__ : Tuple = sas_model.eval()
else:
UpperCAmelCase__ : Tuple = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> str:
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : List[Any] = faiss.StandardGpuResources()
UpperCAmelCase__ : Any = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCAmelCase__ : Optional[int] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCAmelCase__ : Union[str, Any] = faiss.IndexFlatIP(128 )
UpperCAmelCase__ : Any = faiss.index_cpu_to_gpu(snake_case , 1 , snake_case )
wikiaab_gpu_index_flat.add(snake_case ) # TODO fix for larger GPU
else:
UpperCAmelCase__ : Union[str, Any] = (None, None)
UpperCAmelCase__ : int = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
UpperCAmelCase__ : List[str] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCAmelCase__ : Any = elia["train_eli5"]
UpperCAmelCase__ : List[Any] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
UpperCAmelCase__ : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase : List[Any] = load_indexes()
_lowerCAmelCase : Optional[Any] = load_models()
_lowerCAmelCase : Optional[int] = load_train_data()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str=10 )-> List[str]:
UpperCAmelCase__ : str = embed_questions_for_retrieval([question] , snake_case , snake_case )
UpperCAmelCase__ : Any = eli5_train_q_index.search(snake_case , snake_case )
UpperCAmelCase__ : List[str] = [elia_train[int(snake_case )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Tuple="wiki40b" , snake_case : Dict="dense" , snake_case : Optional[int]=10 )-> Any:
if source == "none":
UpperCAmelCase__ : Optional[int] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase__ : int = query_qa_dense_index(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
else:
UpperCAmelCase__ : Any = query_es_index(
snake_case , snake_case , index_name="english_wiki40b_snippets_100w" , n_results=snake_case , )
UpperCAmelCase__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCAmelCase__ : str = "question: {} context: {}".format(snake_case , snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case : None),
} )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[int] , snake_case : Any , snake_case : Dict=64 , snake_case : Any=256 , snake_case : Optional[Any]=False , snake_case : Optional[int]=2 , snake_case : Optional[Any]=0.95 , snake_case : Dict=0.8 )-> Optional[Any]:
with torch.no_grad():
UpperCAmelCase__ : List[str] = qa_sas_generate(
snake_case , snake_case , snake_case , num_answers=1 , num_beams=snake_case , min_len=snake_case , max_len=snake_case , do_sample=snake_case , temp=snake_case , top_p=snake_case , top_k=snake_case , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCAmelCase : Dict = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCAmelCase : Union[str, Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase : Optional[Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase : int = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCAmelCase : Union[str, Any] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCAmelCase : str = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCAmelCase : List[str] = action_list.index(action_st)
_lowerCAmelCase : int = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCAmelCase : int = show_type == """Show full text of passages"""
else:
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Any = True
_lowerCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCAmelCase : Union[str, Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_lowerCAmelCase : Any = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCAmelCase : Union[str, Any] = """wiki40b"""
_lowerCAmelCase : Any = """dense"""
_lowerCAmelCase : Tuple = """beam"""
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Any = 64
_lowerCAmelCase : Optional[int] = 256
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCAmelCase : str = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCAmelCase : Tuple = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowerCAmelCase : List[str] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase : int = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase : str = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase : int = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase : Dict = None
# start main text
_lowerCAmelCase : Optional[Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCAmelCase : Union[str, Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase : Optional[int] = st.text_input("""Enter your question here:""", """""")
else:
_lowerCAmelCase : Optional[Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase : Optional[int] = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_lowerCAmelCase : int = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_lowerCAmelCase : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase : int = support_list[:10]
_lowerCAmelCase : Any = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCAmelCase : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowerCAmelCase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCAmelCase : List[Any] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCAmelCase : int = res[1].strip()
if sec_titles == "":
_lowerCAmelCase : int = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCAmelCase : Any = sec_titles.split(""" & """)
_lowerCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase : int = find_nearest_training(question)
_lowerCAmelCase : Optional[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCAmelCase : str = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCAmelCase : Union[str, Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 366
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , snake_case__ : int , snake_case__ : List[str]=1_3 , snake_case__ : Any=3_2 , snake_case__ : Tuple=3 , snake_case__ : str=4 , snake_case__ : Optional[Any]=[1_0, 2_0, 3_0, 4_0] , snake_case__ : Any=[2, 2, 3, 2] , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : str=1_0 , snake_case__ : int=0.02 , snake_case__ : Optional[int]=["stage2", "stage3", "stage4"] , snake_case__ : Optional[Any]=3 , snake_case__ : str=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Optional[Any] = num_stages
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Tuple = out_features
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : Dict = scope
UpperCAmelCase__ : Any = num_stages
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = self.get_config()
return config, pixel_values, labels
def __a ( self : Dict ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __a ( self : str ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case__ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __a ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UperNetForSemanticSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Any = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ ={'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = UperNetModelTester(self )
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
UpperCAmelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __a ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
def __a ( self : str ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : str , snake_case__ : Tuple , snake_case__ : str ):
UpperCAmelCase__ : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : str = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = _config_zero_init(snake_case__ )
UpperCAmelCase__ : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(config=snake_case__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __a ( self : Any ):
'''simple docstring'''
pass
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
UpperCAmelCase__ : Dict = Image.open(snake_case ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case__ )
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : Optional[int] = processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = prepare_img()
UpperCAmelCase__ : Dict = processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
with torch.no_grad():
UpperCAmelCase__ : int = model(**snake_case__ )
UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) )
| 367
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase : Optional[Any] = """src/transformers"""
_lowerCAmelCase : List[str] = """docs/source/en/tasks"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple , snake_case : str )-> Any:
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase__ : Tuple = f.readlines()
# Find the start prompt.
UpperCAmelCase__ : Dict = 0
while not lines[start_index].startswith(snake_case ):
start_index += 1
start_index += 1
UpperCAmelCase__ : List[str] = start_index
while not lines[end_index].startswith(snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase : Optional[int] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase__ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case , set() )
UpperCAmelCase__ : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : List[Any]=False )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = _find_text_in_file(
filename=os.path.join(snake_case , snake_case ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCAmelCase__ : Optional[int] = get_model_list_for_task(snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case , snake_case ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 368
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 0
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> Tuple:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 369
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 0
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowerCAmelCase : Any = False
try:
_lowerCAmelCase : List[str] = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowerCAmelCase__ :
def __init__( self : Dict , snake_case__ : str = None , snake_case__ : list = [] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = choices
UpperCAmelCase__ : Optional[int] = prompt
if sys.platform == "win32":
UpperCAmelCase__ : Optional[Any] = "*"
else:
UpperCAmelCase__ : List[str] = "➔ "
def __a ( self : int , snake_case__ : int , snake_case__ : str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case__ )
else:
forceWrite(self.choices[index] , snake_case__ )
def __a ( self : List[str] , snake_case__ : int ):
'''simple docstring'''
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(snake_case__ )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def __a ( self : Optional[int] , snake_case__ : Direction , snake_case__ : int = 1 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case__ )
move_cursor(snake_case__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __a ( self : Dict ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __a ( self : str ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __a ( self : str ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case__ )] for number in range(1_0 )] )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = int(chr(self.current_selection ) )
UpperCAmelCase__ : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case__ )
else:
return
else:
return
def __a ( self : List[str] , snake_case__ : int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase__ : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase__ : Dict = int(builtins.input() )
except ValueError:
UpperCAmelCase__ : Tuple = default_choice
else:
UpperCAmelCase__ : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(snake_case__ , "\n" )
return choice
| 370
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase__ : int = s_dict.pop(snake_case )
elif "subsample" in key:
UpperCAmelCase__ : int = s_dict.pop(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = emb.weight.shape
UpperCAmelCase__ : List[Any] = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : int = mam_aaa["args"]
UpperCAmelCase__ : int = mam_aaa["model"]
UpperCAmelCase__ : Union[str, Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(snake_case )
rename_keys(snake_case )
UpperCAmelCase__ : List[str] = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ : Optional[Any] = args.share_decoder_input_output_embed
UpperCAmelCase__ : int = [int(snake_case ) for i in args.conv_kernel_sizes.split("," )]
UpperCAmelCase__ : Tuple = SpeechaTextConfig(
vocab_size=snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=snake_case , num_beams=5 , max_length=200 , use_cache=snake_case , decoder_start_token_id=2 , early_stopping=snake_case , )
UpperCAmelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(snake_case )
UpperCAmelCase__ : Dict = model.model.load_state_dict(snake_case , strict=snake_case )
if len(snake_case ) > 0 and not set(snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f' but all the following weights are missing {missing}' )
if tie_embeds:
UpperCAmelCase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase__ : List[Any] = lm_head_weights
model.save_pretrained(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 371
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 350
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase__ :
def __init__( self : List[Any] , snake_case__ : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Dict ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = root
def __str__( self : Any ):
'''simple docstring'''
return str(self.root )
def __a ( self : int , snake_case__ : Node , snake_case__ : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : List[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case__ ): # If it is the right children
UpperCAmelCase__ : int = new_children
else:
UpperCAmelCase__ : int = new_children
else:
UpperCAmelCase__ : Tuple = new_children
def __a ( self : Optional[Any] , snake_case__ : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __a ( self : List[Any] ):
'''simple docstring'''
return self.root is None
def __a ( self : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Node(snake_case__ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : str = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Any = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : List[Any] = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : List[str] = new_node
break
else:
UpperCAmelCase__ : Any = parent_node.right
UpperCAmelCase__ : int = parent_node
def __a ( self : Dict , *snake_case__ : Union[str, Any] ):
'''simple docstring'''
for value in values:
self.__insert(snake_case__ )
def __a ( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
UpperCAmelCase__ : Union[str, Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : Union[str, Any] = node.left if value < node.value else node.right
return node
def __a ( self : str , snake_case__ : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Any = node.right
return node
def __a ( self : Optional[int] , snake_case__ : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : List[Any] = self.root
while node.left is not None:
UpperCAmelCase__ : List[Any] = node.left
return node
def __a ( self : Tuple , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.search(snake_case__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case__ , snake_case__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case__ , node.left )
else:
UpperCAmelCase__ : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __a ( self : Union[str, Any] , snake_case__ : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __a ( self : Any , snake_case__ : Tuple=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __a ( self : int , snake_case__ : list , snake_case__ : Node | None ):
'''simple docstring'''
if node:
self.inorder(snake_case__ , node.left )
arr.append(node.value )
self.inorder(snake_case__ , node.right )
def __a ( self : Optional[Any] , snake_case__ : int , snake_case__ : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(snake_case__ , snake_case__ ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE__ ( snake_case : Node | None )-> list[Node]:
'''simple docstring'''
UpperCAmelCase__ : Dict = []
if curr_node is not None:
UpperCAmelCase__ : List[str] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Dict = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(snake_case )
# Prints all the elements of the list in order traversal
print(snake_case )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 351
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Any=1_3 , snake_case__ : Dict=7 , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : List[str]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : List[Any]=2 , snake_case__ : int=4 , snake_case__ : Any=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[int]=3 , snake_case__ : List[Any]=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Union[str, Any] = 1_3
UpperCAmelCase__ : str = 7
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Dict = 9_9
UpperCAmelCase__ : Any = 3_8_4
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Union[str, Any] = 4
UpperCAmelCase__ : Any = 3_7
UpperCAmelCase__ : List[Any] = "gelu"
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Union[str, Any] = 0.1
UpperCAmelCase__ : List[Any] = 5_1_2
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Optional[Any] = 0.02
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Optional[Any] = 1_2_8
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Any = 9
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : Any = None
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : str , snake_case__ : Any , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFConvBertModel(config=snake_case__ )
UpperCAmelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ : int = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(snake_case__ )
UpperCAmelCase__ : Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFConvBertForMaskedLM(config=snake_case__ )
UpperCAmelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Dict = TFConvBertForSequenceClassification(config=snake_case__ )
UpperCAmelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Any , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_choices
UpperCAmelCase__ : Union[str, Any] = TFConvBertForMultipleChoice(config=snake_case__ )
UpperCAmelCase__ : List[str] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[int] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Any = TFConvBertForTokenClassification(config=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFConvBertForQuestionAnswering(config=snake_case__ )
UpperCAmelCase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : str = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFConvBertModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = True
if hasattr(snake_case__ , "use_cache" ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Tuple = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Union[str, Any] = getattr(self.model_tester , "key_length" , snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : Any = model_class(snake_case__ )
UpperCAmelCase__ : Dict = len(model(snake_case__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ , saved_model=snake_case__ )
UpperCAmelCase__ : Dict = os.path.join(snake_case__ , "saved_model" , "1" )
UpperCAmelCase__ : Union[str, Any] = tf.keras.models.load_model(snake_case__ )
UpperCAmelCase__ : List[Any] = model(snake_case__ )
if self.is_encoder_decoder:
UpperCAmelCase__ : Any = outputs["encoder_hidden_states"]
UpperCAmelCase__ : Dict = outputs["encoder_attentions"]
else:
UpperCAmelCase__ : int = outputs["hidden_states"]
UpperCAmelCase__ : int = outputs["attentions"]
self.assertEqual(len(snake_case__ ) , snake_case__ )
UpperCAmelCase__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : List[Any] = getattr(self.model_tester , "key_length" , snake_case__ )
UpperCAmelCase__ : List[str] = getattr(self.model_tester , "key_length" , snake_case__ )
def check_decoder_attentions_output(snake_case__ : int ):
UpperCAmelCase__ : List[str] = len(snake_case__ )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ : Tuple = outputs.decoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case__ : Any ):
UpperCAmelCase__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = model_class(snake_case__ )
UpperCAmelCase__ : Dict = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Union[str, Any] = len(snake_case__ )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
if self.is_encoder_decoder:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : Dict = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_decoder_attentions_output(snake_case__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Any = model_class(snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
# Check attention is always last and order is fine
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = model_class(snake_case__ )
UpperCAmelCase__ : int = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case__ ) )
self.assertEqual(model.config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Tuple = model(snake_case__ )[0]
UpperCAmelCase__ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : str = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 353
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =0
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =3.0
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase__ : Optional[int] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase__ : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ["torchrun", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase : str = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_lowerCAmelCase : Any = Accelerator(kwargs_handlers=[ddp_scaler])
_lowerCAmelCase : List[str] = torch.nn.Linear(100, 200)
_lowerCAmelCase : Tuple = accelerator.prepare(model)
# Check the values changed in kwargs
_lowerCAmelCase : int = """"""
_lowerCAmelCase : List[str] = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 354
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 0
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 355
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Any = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : List[Any] , snake_case : Optional[Any] )-> int:
'''simple docstring'''
UpperCAmelCase__ : int = state_dict.pop(snake_case )
UpperCAmelCase__ : str = val
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int:
'''simple docstring'''
UpperCAmelCase__ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase__ : Dict = value
else:
UpperCAmelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[str]=False )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Dict = ""
if is_panoptic:
UpperCAmelCase__ : List[Any] = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ : List[str] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase__ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[Any] = in_proj_weight[:256, :]
UpperCAmelCase__ : List[str] = in_proj_bias[:256]
UpperCAmelCase__ : int = in_proj_weight[256:512, :]
UpperCAmelCase__ : Any = in_proj_bias[256:512]
UpperCAmelCase__ : List[str] = in_proj_weight[-256:, :]
UpperCAmelCase__ : List[Any] = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : List[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase__ : Optional[Any] = "resnet101"
if "dc5" in model_name:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Tuple = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase__ : Any = 250
else:
UpperCAmelCase__ : str = 91
UpperCAmelCase__ : Optional[int] = "huggingface/label-files"
UpperCAmelCase__ : Tuple = "coco-detection-id2label.json"
UpperCAmelCase__ : Tuple = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ : Dict = {int(snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[int] = idalabel
UpperCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase__ : str = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase__ : Tuple = ConditionalDetrImageProcessor(format=snake_case )
# prepare image
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase__ : Optional[int] = encoding["pixel_values"]
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
UpperCAmelCase__ : List[str] = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case , pretrained=snake_case ).eval()
UpperCAmelCase__ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase__ : Optional[Any] = "conditional_detr." + src
rename_key(snake_case , snake_case , snake_case )
UpperCAmelCase__ : Tuple = rename_backbone_keys(snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case , is_panoptic=snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ : List[str] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(snake_case )
UpperCAmelCase__ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase__ : str = state_dict.pop(snake_case )
UpperCAmelCase__ : List[str] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase__ : List[str] = state_dict.pop(snake_case )
UpperCAmelCase__ : List[str] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase__ : Optional[Any] = state_dict.pop(snake_case )
UpperCAmelCase__ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase__ : str = ConditionalDetrForSegmentation(snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case )
model.load_state_dict(snake_case )
model.eval()
model.push_to_hub(repo_id=snake_case , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase__ : List[str] = conditional_detr(snake_case )
UpperCAmelCase__ : Union[str, Any] = model(snake_case )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 357
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> list[int]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = str(snake_case )
UpperCAmelCase__ : Union[str, Any] = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> bool:
'''simple docstring'''
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 11 )-> list[int]:
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : Dict = 13
while len(snake_case ) != count:
if validate(snake_case ):
UpperCAmelCase__ : Any = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( )-> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 358
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger("""transformers.models.speecht5""")
_lowerCAmelCase : Any = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_lowerCAmelCase : List[str] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_lowerCAmelCase : Dict = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_lowerCAmelCase : str = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_lowerCAmelCase : Tuple = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_lowerCAmelCase : List[str] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_lowerCAmelCase : Optional[Any] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_lowerCAmelCase : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Any = []
_lowerCAmelCase : Tuple = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_lowerCAmelCase : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_lowerCAmelCase : int = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_lowerCAmelCase : int = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[Any] , snake_case : str , snake_case : Any , snake_case : Optional[int] )-> Dict:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : List[str] = getattr(snake_case , snake_case )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(snake_case , snake_case ).shape
else:
UpperCAmelCase__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "running_mean":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple )-> Tuple:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ : str = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
if task == "s2t":
UpperCAmelCase__ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Optional[int] = MAPPING_S2T
UpperCAmelCase__ : Optional[int] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Union[str, Any] = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : str = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f'{name} was ignored' )
continue
UpperCAmelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase__ : Optional[int] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(snake_case )[0].split("." )[-2]
UpperCAmelCase__ : str = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
UpperCAmelCase__ : Optional[int] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : Optional[Any] = "weight_v"
elif "bias" in name:
UpperCAmelCase__ : Any = "bias"
elif "weight" in name:
UpperCAmelCase__ : List[Any] = "weight"
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = "running_mean"
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[int] = "num_batches_tracked"
else:
UpperCAmelCase__ : List[Any] = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : Dict = name.split("." )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCAmelCase__ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCAmelCase__ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCAmelCase__ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCAmelCase__ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Dict , snake_case : int , snake_case : Union[str, Any]=None , snake_case : Dict=None , snake_case : Tuple=None , )-> Optional[Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Union[str, Any] = SpeechTaConfig.from_pretrained(snake_case )
else:
UpperCAmelCase__ : List[Any] = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : List[Any] = config.max_text_positions
UpperCAmelCase__ : str = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 1876
UpperCAmelCase__ : Union[str, Any] = 600
UpperCAmelCase__ : Any = config.max_speech_positions
UpperCAmelCase__ : Optional[int] = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
UpperCAmelCase__ : List[str] = 1876
UpperCAmelCase__ : Dict = config.max_speech_positions
UpperCAmelCase__ : Union[str, Any] = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
UpperCAmelCase__ : int = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : List[Any] = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
UpperCAmelCase__ : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCAmelCase__ : Optional[int] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : int = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
UpperCAmelCase__ : Any = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCAmelCase : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 359
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =BioGptTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Union[str, Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Optional[int] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = "lower newer"
UpperCAmelCase__ : List[Any] = "lower newer"
return input_text, output_text
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : str = "lower"
UpperCAmelCase__ : Tuple = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : str = tokens + ["<unk>"]
UpperCAmelCase__ : Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase__ : Dict = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 360
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float , snake_case : float , snake_case : float , )-> float:
'''simple docstring'''
UpperCAmelCase__ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
UpperCAmelCase__ : Any = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ : Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase : List[str] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 361
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
| 0
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 1001 )-> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase__ : Tuple = 2 * i + 1
UpperCAmelCase__ : Dict = 2 * i
UpperCAmelCase__ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowerCAmelCase : Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 362
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298
| 0
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Any = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mask2former'''
SCREAMING_SNAKE_CASE_ =['''swin''']
SCREAMING_SNAKE_CASE_ ={'''hidden_size''': '''hidden_dim'''}
def __init__( self : int , snake_case__ : Optional[Dict] = None , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : int = 1_0_2_4 , snake_case__ : str = "relu" , snake_case__ : int = 6 , snake_case__ : int = 1_0 , snake_case__ : int = 8 , snake_case__ : float = 0.0 , snake_case__ : int = 2_0_4_8 , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 4 , snake_case__ : int = 2_5_5 , snake_case__ : int = 1_0_0 , snake_case__ : float = 0.1 , snake_case__ : float = 2.0 , snake_case__ : float = 5.0 , snake_case__ : float = 5.0 , snake_case__ : int = 1_2_5_4_4 , snake_case__ : float = 3.0 , snake_case__ : float = 0.75 , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : bool = True , snake_case__ : List[int] = [4, 8, 1_6, 3_2] , snake_case__ : bool = None , **snake_case__ : int , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
UpperCAmelCase__ : Dict = CONFIG_MAPPING["swin"](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=snake_case__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Union[str, Any] = backbone_config.pop("model_type" )
UpperCAmelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : Optional[int] = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
UpperCAmelCase__ : List[str] = backbone_config
UpperCAmelCase__ : Union[str, Any] = feature_size
UpperCAmelCase__ : Dict = mask_feature_size
UpperCAmelCase__ : Optional[int] = hidden_dim
UpperCAmelCase__ : List[Any] = encoder_feedforward_dim
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Optional[int] = dim_feedforward
UpperCAmelCase__ : Tuple = pre_norm
UpperCAmelCase__ : Any = enforce_input_projection
UpperCAmelCase__ : Union[str, Any] = common_stride
UpperCAmelCase__ : List[str] = ignore_value
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : Any = no_object_weight
UpperCAmelCase__ : Optional[int] = class_weight
UpperCAmelCase__ : List[Any] = mask_weight
UpperCAmelCase__ : List[Any] = dice_weight
UpperCAmelCase__ : Optional[Any] = train_num_points
UpperCAmelCase__ : Dict = oversample_ratio
UpperCAmelCase__ : Optional[int] = importance_sample_ratio
UpperCAmelCase__ : Any = init_std
UpperCAmelCase__ : str = init_xavier_std
UpperCAmelCase__ : List[str] = use_auxiliary_loss
UpperCAmelCase__ : Optional[int] = feature_strides
UpperCAmelCase__ : List[str] = output_auxiliary_logits
UpperCAmelCase__ : Tuple = decoder_layers
super().__init__(**snake_case__ )
@classmethod
def __a ( cls : str , snake_case__ : PretrainedConfig , **snake_case__ : Any ):
'''simple docstring'''
return cls(
backbone_config=snake_case__ , **snake_case__ , )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : int = self.backbone_config.to_dict()
UpperCAmelCase__ : Optional[Any] = self.__class__.model_type
return output
| 363
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : int , snake_case__ : Optional[Any]="" , snake_case__ : str="train" ):
'''simple docstring'''
assert os.path.isdir(snake_case__ )
UpperCAmelCase__ : int = []
UpperCAmelCase__ : int = os.listdir(snake_case__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase__ : str = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ):
continue
self.documents.append(snake_case__ )
def __len__( self : int ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.documents[idx]
UpperCAmelCase__ : List[Any] = document_path.split("/" )[-1]
with open(snake_case__ , encoding="utf-8" ) as source:
UpperCAmelCase__ : List[str] = source.read()
UpperCAmelCase__ : Optional[Any] = process_story(snake_case__ )
return document_name, story_lines, summary_lines
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = list(filter(lambda snake_case : len(snake_case ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase__ : Tuple = [_add_missing_period(snake_case ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Any = deque(snake_case )
while True:
try:
UpperCAmelCase__ : Optional[int] = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(snake_case )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase__ : Optional[Any] = list(filter(lambda snake_case : not t.startswith("@highlight" ) , snake_case ) )
return story_lines, summary_lines
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[Any] , snake_case : str )-> int:
'''simple docstring'''
if len(snake_case ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case )) )
return sequence
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Union[str, Any] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = torch.ones_like(snake_case )
UpperCAmelCase__ : Optional[Any] = sequence == pad_token_id
UpperCAmelCase__ : Optional[Any] = 0
return mask
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple , snake_case : str )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = [tokenizer.encode(snake_case ) for line in story_lines]
UpperCAmelCase__ : Optional[Any] = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase__ : Any = [tokenizer.encode(snake_case ) for line in summary_lines]
UpperCAmelCase__ : Dict = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Optional[Any] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = []
for sequence in batch:
UpperCAmelCase__ : Dict = -1
UpperCAmelCase__ : Optional[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case )
return torch.tensor(snake_case )
| 364
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , snake_case__ : Dict[str, int] , snake_case__ : List[str] , snake_case__ : int = None , snake_case__ : int = None ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[Any] = pad_token_id
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : int = vocab
UpperCAmelCase__ : Optional[int] = merges
UpperCAmelCase__ : List[Any] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def __a ( cls : Optional[int] , snake_case__ : GPTaTokenizer , *snake_case__ : int , **snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [" ".join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase__ : List[str] = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def __a ( cls : str , snake_case__ : Union[str, os.PathLike] , *snake_case__ : Any , **snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def __a ( cls : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
return cls(**snake_case__ )
def __a ( self : Any ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self : Tuple , snake_case__ : Any , snake_case__ : int = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.tf_tokenizer(snake_case__ )
UpperCAmelCase__ : Optional[int] = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase__ : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase__ : Optional[Any] = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 365
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 0
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : list[int] , snake_case : int )-> int:
for i in range(snake_case , snake_case ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : int )-> int:
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : List[Any] = len(snake_case )
while left <= right:
if right - left < precision:
return lin_search(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase__ : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase__ : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase__ : Any = two_third + 1
else:
UpperCAmelCase__ : str = one_third + 1
UpperCAmelCase__ : Union[str, Any] = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : list[int] , snake_case : int )-> int:
if left < right:
if right - left < precision:
return lin_search(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : str = (left + right) // 3 + 1
UpperCAmelCase__ : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case , one_third - 1 , snake_case , snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case , snake_case , snake_case )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case , snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = input("""Enter numbers separated by comma:\n""").strip()
_lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCAmelCase : List[str] = int(input("""Enter the number to be found in the list:\n""").strip())
_lowerCAmelCase : Optional[Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 366
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 0
|
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Optional[int] = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Tuple = os.path.join(snake_case , "all_results.json" )
if os.path.exists(snake_case ):
with open(snake_case , "r" ) as f:
UpperCAmelCase__ : Any = json.load(snake_case )
else:
raise ValueError(f'can\'t find {path}' )
return results
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( __magic_name__ ):
@classmethod
def __a ( cls : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ : List[str] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __a ( cls : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[str] = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Optional[Any] = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : str = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Any = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[Any] = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Tuple = get_results(snake_case__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[str] = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Dict = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : str = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Tuple = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : List[str] = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "translation_no_trainer" ) ) )
@slow
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Optional[int] = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : List[str] = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Optional[Any] = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(snake_case__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "image_classification_no_trainer" ) ) )
| 367
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : int )-> float:
'''simple docstring'''
UpperCAmelCase__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 0
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 3_2 , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_5_5 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , snake_case__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , snake_case__ : bool = True , snake_case__ : Tuple=7 , snake_case__ : List[str]=3_0 , snake_case__ : Optional[int]=4_0_0 , snake_case__ : Optional[Any]=3 , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_8_8}
UpperCAmelCase__ : Tuple = size_divisor
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : List[str] = rescale_factor
UpperCAmelCase__ : List[str] = do_normalize
UpperCAmelCase__ : Optional[int] = do_center_crop
UpperCAmelCase__ : List[Any] = image_mean
UpperCAmelCase__ : Dict = image_std
UpperCAmelCase__ : Optional[Any] = do_pad
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Optional[int] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
def __a ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Tuple = self.size["shortest_edge"]
UpperCAmelCase__ : List[str] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
UpperCAmelCase__ : Dict = image.size
else:
UpperCAmelCase__ : Optional[Any] = image.shape[1], image.shape[2]
UpperCAmelCase__ : Optional[Any] = size / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ : Union[str, Any] = size, scale * w
else:
UpperCAmelCase__ : Optional[int] = scale * h, size
UpperCAmelCase__ : Optional[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(snake_case__ , snake_case__ ) > max_size:
UpperCAmelCase__ : int = max_size / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = newh * scale
UpperCAmelCase__ : Union[str, Any] = neww * scale
UpperCAmelCase__ : Union[str, Any] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase__ : List[str] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase__ : Union[str, Any] = []
for image in image_inputs:
UpperCAmelCase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
UpperCAmelCase__ : Tuple = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =BridgeTowerImageProcessor if is_vision_available() else None
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BridgeTowerImageProcessingTester(self )
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "size_divisor" ) )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Any = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : int = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 369
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 0
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCAmelCase : int = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_lowerCAmelCase : Optional[Any] = dataset.iloc[:, 1:2].values
_lowerCAmelCase : Optional[Any] = dataset.iloc[:, 2].values
_lowerCAmelCase : int = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCAmelCase : str = PolynomialFeatures(degree=4)
_lowerCAmelCase : int = poly_reg.fit_transform(X)
_lowerCAmelCase : Any = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
plt.scatter(snake_case , snake_case , color="red" )
plt.plot(snake_case , pol_reg.predict(poly_reg.fit_transform(snake_case ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 370
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 371
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 0
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( snake_case )-> bool:
'''simple docstring'''
UpperCAmelCase__ : Tuple = num - 1
UpperCAmelCase__ : Any = 0
while s % 2 == 0:
UpperCAmelCase__ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase__ : str = random.randrange(2 , num - 1 )
UpperCAmelCase__ : Union[str, Any] = pow(snake_case , snake_case , snake_case )
if v != 1:
UpperCAmelCase__ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase__ : Optional[int] = i + 1
UpperCAmelCase__ : List[str] = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( snake_case )-> bool:
'''simple docstring'''
if num < 2:
return False
UpperCAmelCase__ : List[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case = 1024 )-> int:
'''simple docstring'''
while True:
UpperCAmelCase__ : str = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case ):
return num
if __name__ == "__main__":
_lowerCAmelCase : Dict = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 350
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(snake_case__ ):
UpperCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def __a ( self : Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(snake_case__ ):
UpperCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def __a ( self : Dict ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(snake_case__ )
UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(snake_case__ )
UpperCAmelCase__ : Dict = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ : str ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
@slow
def __a ( self : Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case__ )
UpperCAmelCase__ : List[str] = FlaxRobertaModel.from_pretrained(snake_case__ )
UpperCAmelCase__ : List[Any] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ : Any ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
def __a ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase__ : List[Any] = FlaxAutoModel.from_pretrained("bert-base" )
def __a ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase__ : Tuple = FlaxAutoModel.from_pretrained(snake_case__ , revision="aaaaaa" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
UpperCAmelCase__ : Dict = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __a ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(snake_case__ , "Use `from_pt=True` to load this model" ):
UpperCAmelCase__ : Tuple = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 351
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
| 0
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase : Tuple = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase : int = """config.json"""
_lowerCAmelCase : Dict = """diffusion_pytorch_model.bin"""
_lowerCAmelCase : Dict = """diffusion_flax_model.msgpack"""
_lowerCAmelCase : int = """model.onnx"""
_lowerCAmelCase : List[Any] = """diffusion_pytorch_model.safetensors"""
_lowerCAmelCase : Any = """weights.pb"""
_lowerCAmelCase : int = """https://huggingface.co"""
_lowerCAmelCase : Union[str, Any] = default_cache_path
_lowerCAmelCase : str = """diffusers_modules"""
_lowerCAmelCase : List[str] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_lowerCAmelCase : Any = ["""fp16""", """non-ema"""]
_lowerCAmelCase : List[str] = """.self_attn"""
| 352
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : int = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 0
|
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase : List[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : list[int] , snake_case : int , snake_case : list[list[int]] , )-> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
UpperCAmelCase__ : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case ) )
] # the reference grid
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case ) )
] # the action grid
UpperCAmelCase__ : Tuple = init[0]
UpperCAmelCase__ : Any = init[1]
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Any = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase__ : Tuple = [[f, g, x, y]]
UpperCAmelCase__ : str = False # flag that is set when search is complete
UpperCAmelCase__ : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase__ : Any = cell.pop()
UpperCAmelCase__ : Any = next_cell[2]
UpperCAmelCase__ : Dict = next_cell[3]
UpperCAmelCase__ : str = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase__ : Any = True
else:
for i in range(len(snake_case ) ): # to try out different valid actions
UpperCAmelCase__ : List[str] = x + DIRECTIONS[i][0]
UpperCAmelCase__ : Dict = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase__ : List[Any] = g + cost
UpperCAmelCase__ : Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : Any = i
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Optional[Any] = goal[0]
UpperCAmelCase__ : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase__ : Dict = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase__ : Any = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase__ : Any = xa
UpperCAmelCase__ : Tuple = ya
invpath.append([x, y] )
UpperCAmelCase__ : Dict = []
for i in range(len(snake_case ) ):
path.append(invpath[len(snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
_lowerCAmelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_lowerCAmelCase : str = [0, 0]
# all coordinates are given in format [y,x]
_lowerCAmelCase : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
_lowerCAmelCase : str = 1
# the cost map which pushes the path closer to the goal
_lowerCAmelCase : Any = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_lowerCAmelCase : int = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_lowerCAmelCase : Dict = 99
_lowerCAmelCase : Optional[int] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 354
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ =['''image''']
SCREAMING_SNAKE_CASE_ =['''image''']
SCREAMING_SNAKE_CASE_ =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE_ =False
@property
def __a ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self : int ):
'''simple docstring'''
return 8
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCAmelCase__ : Optional[int] = CLIPVisionModel(snake_case__ )
return model
@property
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def __a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCAmelCase__ : Optional[int] = PriorTransformer(**snake_case__ )
return model
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ : Optional[Any] = ShapERenderer(**snake_case__ )
return model
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.dummy_prior
UpperCAmelCase__ : int = self.dummy_image_encoder
UpperCAmelCase__ : Optional[Any] = self.dummy_image_processor
UpperCAmelCase__ : List[Any] = self.dummy_renderer
UpperCAmelCase__ : Union[str, Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
UpperCAmelCase__ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def __a ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : Dict = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : List[Any] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = "cpu"
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Dict = pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase__ : Optional[Any] = output.images[0]
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
UpperCAmelCase__ : int = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch_device == "cpu"
UpperCAmelCase__ : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Any = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ : Optional[int] = batch_size * [inputs[key]]
UpperCAmelCase__ : int = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
UpperCAmelCase__ : Any = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
UpperCAmelCase__ : Any = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Any = torch.Generator(device=snake_case__ ).manual_seed(0 )
UpperCAmelCase__ : str = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 355
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : str = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ["""ConvNextFeatureExtractor"""]
_lowerCAmelCase : List[Any] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 356
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , )-> float:
'''simple docstring'''
UpperCAmelCase__ : List[str] = x_start
UpperCAmelCase__ : List[Any] = fnc(snake_case )
UpperCAmelCase__ : Optional[int] = 0.0
for _ in range(snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase__ : int = (x_end - x_start) / steps + xa
UpperCAmelCase__ : int = fnc(snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase__ : Any = xa
UpperCAmelCase__ : Any = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
_lowerCAmelCase : List[str] = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 357
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowerCAmelCase : List[str] = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
_lowerCAmelCase : str = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
_lowerCAmelCase : Dict = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
_lowerCAmelCase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCAmelCase : Tuple = [0] * args.vocab_size
for k, v in counter.items():
_lowerCAmelCase : List[Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 358
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
_lowerCAmelCase : str = {
"""camembert-base""": 512,
}
_lowerCAmelCase : Union[str, Any] = """▁"""
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
def __init__( self : str , snake_case__ : str , snake_case__ : Optional[Any]="<s>" , snake_case__ : int="</s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : int=["<s>NOTUSED", "</s>NOTUSED"] , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
UpperCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
UpperCAmelCase__ : List[str] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase__ : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase__ : Optional[int] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : int ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def __a ( self : Optional[int] , snake_case__ : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case__ )
def __a ( self : Optional[int] , snake_case__ : Optional[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : Dict , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[int] = ""
UpperCAmelCase__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase__ : int = None
return state
def __setstate__( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : int , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
UpperCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 359
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''blenderbot-small'''
SCREAMING_SNAKE_CASE_ =['''past_key_values''']
SCREAMING_SNAKE_CASE_ ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , snake_case__ : str=5_0_2_6_5 , snake_case__ : int=5_1_2 , snake_case__ : Optional[int]=8 , snake_case__ : int=2_0_4_8 , snake_case__ : List[str]=1_6 , snake_case__ : Optional[int]=8 , snake_case__ : Optional[int]=2_0_4_8 , snake_case__ : int=1_6 , snake_case__ : Tuple=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]="gelu" , snake_case__ : Any=5_1_2 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.0 , snake_case__ : Dict=0.0 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[int]=1 , snake_case__ : Union[str, Any]=False , snake_case__ : Optional[Any]=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : str=2 , snake_case__ : List[str]=2 , **snake_case__ : int , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = d_model
UpperCAmelCase__ : Any = encoder_ffn_dim
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : Any = decoder_attention_heads
UpperCAmelCase__ : Optional[int] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : List[Any] = activation_function
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
class lowerCAmelCase__ ( __magic_name__ ):
@property
def __a ( self : Dict ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase__ : Any = {0: "batch"}
UpperCAmelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase__ : str = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase__ : Dict = self.num_layers
for i in range(snake_case__ ):
UpperCAmelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : List[str] = super().outputs
else:
UpperCAmelCase__ : Tuple = super(snake_case__ , self ).outputs
if self.use_past:
UpperCAmelCase__ : List[Any] = self.num_layers
for i in range(snake_case__ ):
UpperCAmelCase__ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __a ( self : str , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
UpperCAmelCase__ : Dict = seq_length if not self.use_past else 1
UpperCAmelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[int] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : str = dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ : Tuple = common_inputs["input_ids"].shape
UpperCAmelCase__ : Dict = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase__ : Optional[int] = self.num_attention_heads
UpperCAmelCase__ : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : int = decoder_seq_length + 3
UpperCAmelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ : Optional[int] = self.num_layers
UpperCAmelCase__ : Union[str, Any] = min(snake_case__ , snake_case__ )
UpperCAmelCase__ : int = max(snake_case__ , snake_case__ ) - min_num_layers
UpperCAmelCase__ : Tuple = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
UpperCAmelCase__ : str = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def __a ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : List[str] = seqlen + 2
UpperCAmelCase__ : List[Any] = self.num_layers
UpperCAmelCase__ : Union[str, Any] = self.num_attention_heads
UpperCAmelCase__ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : str = common_inputs["attention_mask"].dtype
UpperCAmelCase__ : Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
UpperCAmelCase__ : List[str] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def __a ( self : Optional[int] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : List[Any] = tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase__ : Any = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : List[str] = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def __a ( self : Optional[int] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
elif self.task == "causal-lm":
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
UpperCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : List[str] = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCAmelCase__ : List[str] = super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 360
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase : int = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[Any]=False )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = 2
if unlogit:
UpperCAmelCase__ : Tuple = torch.pow(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = p * torch.log(snake_case )
UpperCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(f'{x + 1}' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] , snake_case : str , snake_case : Dict=True , snake_case : str=True , snake_case : Dict=None , snake_case : Tuple=False )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ : Dict = torch.zeros(snake_case , snake_case ).to(args.device )
UpperCAmelCase__ : Dict = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
UpperCAmelCase__ : List[Any] = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Dict = 0.0
UpperCAmelCase__ : List[str] = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
(UpperCAmelCase__ ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ : str = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
UpperCAmelCase__ : List[Any] = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : str = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
UpperCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
UpperCAmelCase__ : Union[str, Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ : Dict = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ : Any = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[Any] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
UpperCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
UpperCAmelCase__ : List[str] = torch.ones_like(snake_case )
UpperCAmelCase__ : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ : Optional[Any] = float("Inf" )
UpperCAmelCase__ : Dict = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase__ : Any = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ : int = new_head_mask.view(-1 )
UpperCAmelCase__ : int = 0.0
UpperCAmelCase__ : List[Any] = new_head_mask.view_as(snake_case )
UpperCAmelCase__ : List[Any] = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
UpperCAmelCase__ : Union[str, Any] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
UpperCAmelCase__ : Any = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ : Optional[int] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
UpperCAmelCase__ : List[Any] = 1 / loss
UpperCAmelCase__ : List[Any] = datetime.now() - before_time
UpperCAmelCase__ : Optional[Any] = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : int = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
UpperCAmelCase__ : Any = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ : str = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
UpperCAmelCase__ : str = 1 / loss
UpperCAmelCase__ : Any = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(snake_case , args.output_dir )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=42 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
UpperCAmelCase__ : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase__ : Dict = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ : List[Any] = torch.device("cuda" , args.local_rank )
UpperCAmelCase__ : Optional[int] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ : str = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
UpperCAmelCase__ : int = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
UpperCAmelCase__ : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ : Any = (torch.from_numpy(snake_case ),)
UpperCAmelCase__ : Any = TensorDataset(*snake_case )
UpperCAmelCase__ : Dict = RandomSampler(snake_case )
UpperCAmelCase__ : List[str] = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ : int = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
| 0
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 362
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : List[Any] )-> int:
'''simple docstring'''
with open(snake_case ) as metadata_file:
UpperCAmelCase__ : List[Any] = json.load(snake_case )
UpperCAmelCase__ : List[Any] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCAmelCase__ : Dict = torch.load(snake_case , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCAmelCase__ : Dict = load_original_entity_vocab(snake_case )
# add an entry for [MASK2]
UpperCAmelCase__ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase__ : Optional[int] = AddedToken("<ent>" , lstrip=snake_case , rstrip=snake_case )
UpperCAmelCase__ : Any = AddedToken("<ent2>" , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "r" ) as f:
UpperCAmelCase__ : Any = json.load(snake_case )
UpperCAmelCase__ : Optional[Any] = "MLukeTokenizer"
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" ) as f:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case , snake_case )
UpperCAmelCase__ : Tuple = MLukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCAmelCase__ : List[Any] = state_dict["embeddings.word_embeddings.weight"]
UpperCAmelCase__ : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
UpperCAmelCase__ : Tuple = word_emb[enta_init_index].unsqueeze(0 )
UpperCAmelCase__ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCAmelCase__ : List[Any] = state_dict[bias_name]
UpperCAmelCase__ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCAmelCase__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCAmelCase__ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase__ : Union[str, Any] = f'encoder.layer.{layer_index}.attention.self.'
UpperCAmelCase__ : List[str] = state_dict[prefix + matrix_name]
UpperCAmelCase__ : Optional[int] = state_dict[prefix + matrix_name]
UpperCAmelCase__ : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase__ : Optional[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCAmelCase__ : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCAmelCase__ : Dict = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCAmelCase__ : List[Any] = state_dict["entity_predictions.bias"]
UpperCAmelCase__ : Optional[int] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCAmelCase__ : Tuple = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCAmelCase__ : List[str] = LukeForMaskedLM(config=snake_case ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCAmelCase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCAmelCase__ : Optional[Any] = state_dict[key]
else:
UpperCAmelCase__ : Tuple = state_dict[key]
UpperCAmelCase__ : Dict = model.load_state_dict(snake_case , strict=snake_case )
if set(snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCAmelCase__ : List[Any] = MLukeTokenizer.from_pretrained(snake_case , task="entity_classification" )
UpperCAmelCase__ : Any = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCAmelCase__ : List[Any] = (0, 9)
UpperCAmelCase__ : Optional[int] = tokenizer(snake_case , entity_spans=[span] , return_tensors="pt" )
UpperCAmelCase__ : Dict = model(**snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase__ : Tuple = torch.Size((1, 33, 768) )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase__ : Tuple = torch.Size((1, 1, 768) )
UpperCAmelCase__ : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCAmelCase__ : Any = MLukeTokenizer.from_pretrained(snake_case )
UpperCAmelCase__ : Any = "Tokyo is the capital of <mask>."
UpperCAmelCase__ : str = (24, 30)
UpperCAmelCase__ : Any = tokenizer(snake_case , entity_spans=[span] , return_tensors="pt" )
UpperCAmelCase__ : List[str] = model(**snake_case )
UpperCAmelCase__ : Tuple = encoding["input_ids"][0].tolist()
UpperCAmelCase__ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCAmelCase__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case )
UpperCAmelCase__ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
UpperCAmelCase__ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case ) )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ["[MASK]", "[PAD]", "[UNK]"]
UpperCAmelCase__ : Union[str, Any] = [json.loads(snake_case ) for line in open(snake_case )]
UpperCAmelCase__ : List[str] = {}
for entry in data:
UpperCAmelCase__ : List[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCAmelCase__ : str = entity_id
break
UpperCAmelCase__ : Optional[Any] = f'{language}:{entity_name}'
UpperCAmelCase__ : Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowerCAmelCase : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 363
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : List[Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : str = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
UpperCAmelCase__ : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
UpperCAmelCase__ : Any = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Any , **snake_case__ : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def __a ( self : str , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = "adapt act apte"
UpperCAmelCase__ : Dict = "adapt act apte"
return input_text, output_text
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase__ : Optional[int] = "adapt act apte"
UpperCAmelCase__ : Optional[int] = ["adapt", "act", "ap@@", "te"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase__ : Optional[int] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
UpperCAmelCase__ : Any = "I am a small frog."
UpperCAmelCase__ : str = tok([src_text] , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
UpperCAmelCase__ : Optional[int] = tok.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
UpperCAmelCase__ : List[str] = "I am a small frog ."
UpperCAmelCase__ : Optional[Any] = "."
UpperCAmelCase__ : Optional[Any] = tok(snake_case__ )["input_ids"]
UpperCAmelCase__ : List[Any] = tok(snake_case__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 364
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
if n == 1 or not isinstance(snake_case , snake_case ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Any = 2
while digits < n:
index += 1
UpperCAmelCase__ : int = len(str(fibonacci(snake_case ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 1000 )-> int:
'''simple docstring'''
return fibonacci_digits_index(snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 365
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''ctrl'''
SCREAMING_SNAKE_CASE_ =['''past_key_values''']
SCREAMING_SNAKE_CASE_ ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , snake_case__ : Optional[int]=2_4_6_5_3_4 , snake_case__ : str=2_5_6 , snake_case__ : Optional[Any]=1_2_8_0 , snake_case__ : List[Any]=8_1_9_2 , snake_case__ : List[str]=4_8 , snake_case__ : Any=1_6 , snake_case__ : Optional[int]=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[int]=1e-6 , snake_case__ : str=0.02 , snake_case__ : Any=True , **snake_case__ : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Tuple = n_positions
UpperCAmelCase__ : List[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Tuple = n_head
UpperCAmelCase__ : Dict = dff
UpperCAmelCase__ : Optional[Any] = resid_pdrop
UpperCAmelCase__ : int = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Dict = use_cache
super().__init__(**snake_case__ )
| 366
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase__ ( unittest.TestCase , __magic_name__ ):
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : Dict = load_tool("text-classification" , remote=snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(snake_case__ , "positive" )
| 367
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger()
@dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =field(default_factory=__magic_name__ )
SCREAMING_SNAKE_CASE_ =field(default_factory=__magic_name__ )
def __a ( self : int , snake_case__ : Optional[int] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
UpperCAmelCase__ : int = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : Union[str, Any] , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __a ( self : Dict ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =0
SCREAMING_SNAKE_CASE_ =field(default_factory=__magic_name__ )
SCREAMING_SNAKE_CASE_ =field(default_factory=__magic_name__ )
def __call__( self : Union[str, Any] , snake_case__ : Tensor ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Tracker(self.dest )(snake_case__ ).parametrized
UpperCAmelCase__ : List[str] = Tracker(self.src )(snake_case__ ).parametrized
UpperCAmelCase__ : Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
UpperCAmelCase__ : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(snake_case__ )} operations while'
f' destination module has {len(snake_case__ )}.' )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : ResNetConfig , snake_case : Path , snake_case : bool = True )-> int:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
UpperCAmelCase__ : Any = timm.create_model(snake_case , pretrained=snake_case ).eval()
UpperCAmelCase__ : Any = ResNetForImageClassification(snake_case ).eval()
UpperCAmelCase__ : Any = ModuleTransfer(src=snake_case , dest=snake_case )
UpperCAmelCase__ : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case )
assert torch.allclose(from_model(snake_case ) , our_model(snake_case ).logits ), "The model logits don't match the original one."
UpperCAmelCase__ : Tuple = f'resnet{"-".join(name.split("resnet" ) )}'
print(snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case , )
# we can use the convnext one
UpperCAmelCase__ : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case , )
print(f'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Path , snake_case : str = None , snake_case : bool = True )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = "imagenet-1k-id2label.json"
UpperCAmelCase__ : Union[str, Any] = 1000
UpperCAmelCase__ : List[Any] = (1, num_labels)
UpperCAmelCase__ : Optional[int] = "huggingface/label-files"
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : str = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ : Optional[Any] = {int(snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Dict = idalabel
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
UpperCAmelCase__ : int = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 369
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Tuple , snake_case__ : List[str]=7 , snake_case__ : List[str]=3 , snake_case__ : List[str]=3_0 , snake_case__ : Any=4_0_0 , snake_case__ : str=True , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=[0.5, 0.5, 0.5] , snake_case__ : List[str]=[0.5, 0.5, 0.5] , snake_case__ : int=True , snake_case__ : Optional[Any]=1 / 2_5_5 , snake_case__ : Optional[Any]=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Union[str, Any] = max_resolution
UpperCAmelCase__ : Optional[Any] = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : List[str] = do_normalize
UpperCAmelCase__ : List[Any] = image_mean
UpperCAmelCase__ : List[Any] = image_std
UpperCAmelCase__ : List[str] = do_rescale
UpperCAmelCase__ : Dict = rescale_factor
UpperCAmelCase__ : Dict = do_pad
def __a ( self : Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : int = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
UpperCAmelCase__ : List[Any] = image.size
else:
UpperCAmelCase__ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase__ : Tuple = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase__ : Optional[int] = self.size["shortest_edge"]
UpperCAmelCase__ : Any = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCAmelCase__ : Tuple = self.size["shortest_edge"]
else:
UpperCAmelCase__ : List[str] = []
for image in image_inputs:
UpperCAmelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : str = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
UpperCAmelCase__ : List[str] = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =DeformableDetrImageProcessor if is_vision_available() else None
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = DeformableDetrImageProcessingTester(self )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "do_rescale" ) )
self.assertTrue(hasattr(snake_case__ , "do_pad" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
UpperCAmelCase__ : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : str = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase__ : Optional[Any] = json.loads(f.read() )
UpperCAmelCase__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessor()
UpperCAmelCase__ : Tuple = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" )
# verify pixel values
UpperCAmelCase__ : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
UpperCAmelCase__ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
UpperCAmelCase__ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify orig_size
UpperCAmelCase__ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
UpperCAmelCase__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase__ : List[str] = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
UpperCAmelCase__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
UpperCAmelCase__ : List[Any] = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" )
# verify pixel values
UpperCAmelCase__ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
UpperCAmelCase__ : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Optional[int] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
UpperCAmelCase__ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
UpperCAmelCase__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify masks
UpperCAmelCase__ : List[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
| 370
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 0
|
"""simple docstring"""
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Dict , snake_case : List[str] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase__ : Optional[Any] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCAmelCase__ : int = f'{src_lang}-{tgt_lang}'
UpperCAmelCase__ : List[Any] = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case , exist_ok=snake_case )
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "README.md" )
print(f'Generating {path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(snake_case )
# make sure we are under the root of the project
_lowerCAmelCase : int = Path(__file__).resolve().parent.parent.parent
_lowerCAmelCase : Any = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCAmelCase : Tuple = model_name.split("""-""")
_lowerCAmelCase : Dict = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 371
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 0
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a__ ( lowerCAmelCase__ ) -> List[str]:
for param in module.parameters():
UpperCAmelCase__ : List[str] = False
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase__ : Union[str, Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Dict = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = datetime.now()
UpperCAmelCase__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 299
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = XLMRobertaTokenizer
lowerCAmelCase__ = XLMRobertaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''<pad>'''
UpperCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_002 )
def lowercase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A )
UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A )
UpperCAmelCase__ : str = pickle.dumps(_A )
pickle.loads(_A )
def lowercase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_A )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = '''Hello World!'''
UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase__ : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 299
| 1
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ) -> Optional[int]:
UpperCAmelCase__ : Any = {doc: key_lines}
UpperCAmelCase__ : Dict = {doc: sys_lines}
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ , UpperCAmelCase__ : int = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : Union[str, Any] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ : Dict = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : Optional[int] = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : Tuple = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ : Union[str, Any] = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Tuple = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase__ : Optional[int] = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ : str = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowercase_ ( self : str , _A : Union[str, Any] , _A : Dict , _A : str=True , _A : int=False , _A : str=False , _A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase__ : List[Any] = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ : str = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase__ = get_tests_dir('''fixtures''')
UpperCamelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
UpperCamelCase__ = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 0
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('''feature_extractor_type''' )
UpperCAmelCase__ : List[Any] = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
UpperCAmelCase__ : Union[str, Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
UpperCAmelCase__ : str = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : int = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = True
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_A , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 299
|
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = n
UpperCAmelCase__ : Union[str, Any] = [None] * self.n
UpperCAmelCase__ : Tuple = 0 # index of the first element
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.size
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.size == 0
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase__ : str = data
UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase__ : Any = self.array[self.front]
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 299
| 1
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = BigBirdTokenizer
lowerCAmelCase__ = BigBirdTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = self.tokenizer_class(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''<s>'''
UpperCAmelCase__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_A ) , 1_004 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_rust_tokenizer()
UpperCAmelCase__ : str = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_A )
UpperCAmelCase__ : str = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : str = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = tokenizer.encode(_A )
UpperCAmelCase__ : Tuple = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BigBirdTokenizer(_A , keep_accents=_A )
UpperCAmelCase__ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowercase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''Hello World!'''
UpperCAmelCase__ : List[Any] = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase__ : Optional[int] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase__ : Tuple = ''' '''.join(_A )
UpperCAmelCase__ : List[str] = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
UpperCAmelCase__ : int = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
UpperCAmelCase__ : List[str] = BigBirdConfig(attention_type='''original_full''' )
UpperCAmelCase__ : Union[str, Any] = BigBirdModel(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 299
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ = random.Random()
if is_torch_available():
import torch
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase__ : List[str] = global_rng
UpperCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = min_seq_length
UpperCAmelCase__ : str = max_seq_length
UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Optional[Any] = feature_size
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : Tuple = return_attention_mask
UpperCAmelCase__ : str = do_normalize
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ):
'''simple docstring'''
def _flatten(_A : Union[str, Any] ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ASTFeatureExtractor
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = ASTFeatureExtractionTester(self )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Any = np.asarray(_A )
UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor()
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 299
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299
| 1
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCamelCase__ = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCamelCase__ = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
UpperCamelCase__ = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ["input_ids"]
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = RESOURCE_FILES_NAMES
def __init__( self : int , _A : int , _A : Any=None , _A : Dict=False , _A : Optional[int]="utf8" , _A : List[str]="[UNK]" , _A : Optional[Any]="[SEP]" , _A : Union[str, Any]="[PAD]" , _A : Optional[Any]="[CLS]" , _A : Optional[int]="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCAmelCase__ : Tuple = do_lower_case
UpperCAmelCase__ : Tuple = sentencepiece_model_ckpt
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase__ : int = self.load_vocab(filepath=_A )
else:
UpperCAmelCase__ : Union[str, Any] = {self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase__ : List[Any] = {v: k for k, v in self.vocab.items()}
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
if text is None:
return None
UpperCAmelCase__ : Any = self.tokenize(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = '''''', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase__ : int = self.SP_CHAR_MAPPING.get(_A )
else:
UpperCAmelCase__ : Dict = unicodedata.normalize('''NFKC''' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase__ : Dict = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase__ : Optional[Any] = token[1:]
UpperCAmelCase__ : int = text[offset:].index(_A ) + offset
UpperCAmelCase__ : Tuple = start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase__ : Any = end
return token_mapping
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.vocab )
def lowercase_ ( self : int ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.__dict__.copy()
UpperCAmelCase__ : List[Any] = None
return state
def __setstate__( self : Optional[int] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase_ ( self : Tuple , _A : Optional[int] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def lowercase_ ( self : Any , _A : List[str] , _A : Optional[int]=False , _A : str=64 , _A : List[Any]=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase__ : Tuple = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase__ : List[Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase__ : Dict = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase__ : Optional[int] = self.sp_model.EncodeAsPieces(_A )
else:
UpperCAmelCase__ : Dict = self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
UpperCAmelCase__ : Tuple = []
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
UpperCAmelCase__ : List[Any] = 0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
UpperCAmelCase__ : str = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ : Optional[Any] = i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def lowercase_ ( self : Any , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.convert_ids_to_tokens(_A )
UpperCAmelCase__ : int = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def lowercase_ ( self : Tuple , _A : Optional[int] ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def lowercase_ ( self : List[str] , _A : str ):
'''simple docstring'''
return self.reverse_vocab.get(_A , self.unk_token )
def lowercase_ ( self : Any , _A : Optional[Any] , _A : str=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase_ ( self : int , _A : Any , _A : Tuple=None , _A : Optional[int]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def lowercase_ ( self : Dict , _A : Tuple ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase_ ( self : Union[str, Any] , _A : Tuple ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase_ ( self : List[Any] , _A : Any ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase_ ( self : List[Any] , _A : str ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
UpperCAmelCase__ : Optional[Any] = unicodedata.category(_A )
if cat == "Zs":
return True
return False
def lowercase_ ( self : Dict , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = {}
with io.open(_A , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_A ):
UpperCAmelCase__ : Optional[Any] = line.rstrip('''\n''' )
UpperCAmelCase__ : int = int(_A )
return token_to_idx
def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
if os.path.isdir(_A ):
UpperCAmelCase__ : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase__ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase__ : str = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase__ : List[str] = os.path.join(_A , '''sentencepiece.bpe.model''' )
with open(_A , '''wb''' ) as fi:
UpperCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 299
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
UpperCamelCase__ = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
UpperCamelCase__ = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
UpperCamelCase__ = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
UpperCamelCase__ = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
UpperCamelCase__ = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
UpperCamelCase__ = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
UpperCamelCase__ = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
UpperCamelCase__ = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : int = value
elif weight_type == "running_mean":
UpperCAmelCase__ : int = value
elif weight_type == "running_var":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = []
if task == "s2t":
UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : List[Any] = MAPPING_S2T
UpperCAmelCase__ : int = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Tuple = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = '''weight'''
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked'''
else:
UpperCAmelCase__ : Union[str, Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Any = int(items[0] )
UpperCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : str = config.max_text_positions
UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : int = 6_00
UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions
UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : Optional[Any] = config.max_speech_positions
UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
UpperCAmelCase__ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 299
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 299
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : int = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = fairseq_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : Tuple = '''weight'''
else:
UpperCAmelCase__ : Optional[Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : int = UniSpeechSatConfig()
UpperCAmelCase__ : Tuple = ''''''
if is_finetuned:
UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 299
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase__ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
UpperCamelCase__ = json.load(f)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int , _A : Dict ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(_A )
def lowercase_ ( self : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def lowercase_ ( self : Dict , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = f"""facebook/wmt19-{pair}"""
UpperCAmelCase__ : List[Any] = self.get_tokenizer(_A )
UpperCAmelCase__ : List[str] = self.get_model(_A )
UpperCAmelCase__ : List[Any] = bleu_data[pair]['''src''']
UpperCAmelCase__ : Optional[int] = bleu_data[pair]['''tgt''']
UpperCAmelCase__ : Union[str, Any] = tokenizer(_A , return_tensors='''pt''' , truncation=_A , padding='''longest''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase__ : Any = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
UpperCAmelCase__ : List[str] = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores['''bleu'''] , _A )
| 299
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ = random.Random()
if is_torch_available():
import torch
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase__ : List[str] = global_rng
UpperCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = min_seq_length
UpperCAmelCase__ : str = max_seq_length
UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Optional[Any] = feature_size
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : Tuple = return_attention_mask
UpperCAmelCase__ : str = do_normalize
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ):
'''simple docstring'''
def _flatten(_A : Union[str, Any] ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ASTFeatureExtractor
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = ASTFeatureExtractionTester(self )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Any = np.asarray(_A )
UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor()
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 299
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 299
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values()
UpperCAmelCase__ : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
UpperCAmelCase__ : Any = '''Hello, world. How are you?'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase__ : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = False
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : int = value
elif weight_type == "bias":
UpperCAmelCase__ : Union[str, Any] = value
else:
UpperCAmelCase__ : Any = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = fairseq_model.state_dict()
UpperCAmelCase__ : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase__ : Tuple = None
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : List[Any] = True
elif name.split('''.''' )[0] == "proj":
UpperCAmelCase__ : Optional[Any] = fairseq_model.proj
UpperCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase__ : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase__ : int = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : int = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : str = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Tuple = '''bias'''
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = '''weight'''
else:
UpperCAmelCase__ : int = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : List[Any] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : List[str] = name.split('''.''' )
UpperCAmelCase__ : Optional[int] = int(items[0] )
UpperCAmelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase__ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase__ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ , UpperCAmelCase__ : int = emb.weight.shape
UpperCAmelCase__ : List[str] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
UpperCAmelCase__ : Dict = emb.weight.data
return lin_layer
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : Optional[Any] = f.readlines()
UpperCAmelCase__ : str = [line.split(''' ''' )[0] for line in lines]
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
UpperCAmelCase__ : int = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
UpperCAmelCase__ : List[str] = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase__ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase__ : Dict = WavaVecaModel(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
UpperCAmelCase__ : str = SpeechaTextaForCausalLM(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
UpperCAmelCase__ : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase__ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
UpperCAmelCase__ : Dict = False
# add projection layer
UpperCAmelCase__ : str = nn.Parameter(projection_layer.weight )
UpperCAmelCase__ : Dict = nn.Parameter(projection_layer.bias )
UpperCAmelCase__ : Optional[Any] = create_vocab_dict(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = hf_wavavec.config.to_dict()
UpperCAmelCase__ : Union[str, Any] = tokenizer.pad_token_id
UpperCAmelCase__ : Optional[Any] = tokenizer.bos_token_id
UpperCAmelCase__ : Tuple = tokenizer.eos_token_id
UpperCAmelCase__ : Optional[Any] = '''speech_to_text_2'''
UpperCAmelCase__ : Union[str, Any] = '''wav2vec2'''
UpperCAmelCase__ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 299
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float:
UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 299
| 1
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=10_24 , lowerCAmelCase__=10_24 , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : int = SeqaSeqDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , type_path='''train''' , **lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = tok.pad_token_id
def get_lens(lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = tqdm(
DataLoader(lowerCAmelCase__ , batch_size=5_12 , num_workers=8 , shuffle=lowerCAmelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase__ : Dict = []
for batch in dl:
UpperCAmelCase__ : int = batch['''input_ids'''].ne(lowerCAmelCase__ ).sum(1 ).tolist()
UpperCAmelCase__ : List[str] = batch['''labels'''].ne(lowerCAmelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
max_lens.append(max(lowerCAmelCase__ , lowerCAmelCase__ ) )
else:
max_lens.extend(lowerCAmelCase__ )
return max_lens
UpperCAmelCase__ : List[Any] = get_lens(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = SeqaSeqDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , type_path='''val''' , **lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = get_lens(lowerCAmelCase__ )
pickle_save(lowerCAmelCase__ , train_ds.len_file )
pickle_save(lowerCAmelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 299
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Dict = args.pruning_method
UpperCAmelCase__ : List[Any] = args.threshold
UpperCAmelCase__ : Any = args.model_name_or_path.rstrip('''/''' )
UpperCAmelCase__ : int = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
UpperCAmelCase__ : Optional[Any] = torch.load(os.path.join(lowerCAmelCase__ , '''pytorch_model.bin''' ) )
UpperCAmelCase__ : Optional[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCAmelCase__ : Optional[Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
UpperCAmelCase__ : Optional[int] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
UpperCAmelCase__ : List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
UpperCAmelCase__ : str = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCAmelCase__ : Dict = name[:-6]
UpperCAmelCase__ : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ : str = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCAmelCase__ : int = name[:-6]
UpperCAmelCase__ : List[Any] = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ : Optional[int] = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCAmelCase__ : Dict = name[:-6]
UpperCAmelCase__ : Optional[Any] = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = -0.1, 1.1
UpperCAmelCase__ : List[str] = torch.sigmoid(lowerCAmelCase__ )
UpperCAmelCase__ : str = s * (r - l) + l
UpperCAmelCase__ : Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
UpperCAmelCase__ : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCAmelCase__ : Any = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , F"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 299
|
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]:
UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
'''simple docstring'''
import string
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Any = ''''''
for i in sequence:
UpperCAmelCase__ : List[Any] = ord(lowerCAmelCase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : List[Any] = string.ascii_letters
UpperCAmelCase__ : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def a__ ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase__ : Dict = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase__ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase__ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 299
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_A : Any , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 299
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'swin2sr'
lowerCAmelCase__ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , _A : Optional[int]=64 , _A : Optional[int]=1 , _A : Any=3 , _A : str=180 , _A : List[Any]=[6, 6, 6, 6, 6, 6] , _A : str=[6, 6, 6, 6, 6, 6] , _A : Optional[int]=8 , _A : Any=2.0 , _A : int=True , _A : Tuple=0.0 , _A : Dict=0.0 , _A : List[str]=0.1 , _A : Optional[int]="gelu" , _A : int=False , _A : Tuple=0.0_2 , _A : int=1e-5 , _A : List[Any]=2 , _A : str=1.0 , _A : int="1conv" , _A : Any="pixelshuffle" , **_A : Any , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Tuple = embed_dim
UpperCAmelCase__ : Union[str, Any] = depths
UpperCAmelCase__ : Optional[int] = len(_A )
UpperCAmelCase__ : Any = num_heads
UpperCAmelCase__ : Union[str, Any] = window_size
UpperCAmelCase__ : List[str] = mlp_ratio
UpperCAmelCase__ : Any = qkv_bias
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = drop_path_rate
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = use_absolute_embeddings
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : List[Any] = upscale
UpperCAmelCase__ : Union[str, Any] = img_range
UpperCAmelCase__ : Union[str, Any] = resi_connection
UpperCAmelCase__ : str = upsampler
| 299
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : str , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : Dict[str, int] = None , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Optional[int] = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Optional[int] = get_size_dict(_A )
UpperCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Any = get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' )
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Any = do_normalize
UpperCAmelCase__ : Tuple = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : Optional[Any] = size
UpperCAmelCase__ : Optional[Any] = resample
UpperCAmelCase__ : Tuple = rescale_factor
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_size_dict(_A )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase__ : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : Any = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : Union[str, Any] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Union[str, Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A )
UpperCAmelCase__ : str = resample if resample is not None else self.resample
UpperCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Dict = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Optional[int] = size if size is not None else self.size
UpperCAmelCase__ : Tuple = get_size_dict(_A )
if not is_batched(_A ):
UpperCAmelCase__ : Any = [images]
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCAmelCase__ : Union[str, Any] = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : int = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 299
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase_ ( __a ):
def __get__( self : str , _A : Tuple , _A : List[str]=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if cached is None:
UpperCAmelCase__ : Dict = self.fget(_A )
setattr(_A , _A , _A )
return cached
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> Any:
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> int:
return _is_numpy(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name )
UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
UpperCAmelCase__ : List[Any] = first_field.items()
UpperCAmelCase__ : Optional[int] = True
else:
try:
UpperCAmelCase__ : Optional[int] = iter(_A )
UpperCAmelCase__ : Optional[int] = True
except TypeError:
UpperCAmelCase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase__ : List[str] = element[1]
elif first_field is not None:
UpperCAmelCase__ : Optional[Any] = first_field
else:
for field in class_fields:
UpperCAmelCase__ : Optional[int] = getattr(self , field.name )
if v is not None:
UpperCAmelCase__ : str = v
def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ):
'''simple docstring'''
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ):
'''simple docstring'''
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : List[str] , _A : Any ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , _A : Union[str, Any] , _A : str ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ):
'''simple docstring'''
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class lowerCamelCase_ ( __a , __a ):
@classmethod
def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ):
'''simple docstring'''
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'longest'
lowerCAmelCase__ = 'max_length'
lowerCAmelCase__ = 'do_not_pad'
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'pt'
lowerCAmelCase__ = 'tf'
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = 'jax'
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : List[ContextManager] ):
'''simple docstring'''
UpperCAmelCase__ : str = context_managers
UpperCAmelCase__ : int = ExitStack()
def __enter__( self : str ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self : Dict , *_A : List[Any] , **_A : str ):
'''simple docstring'''
self.stack.__exit__(*_A , **_A )
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Dict = model_class.__name__
UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any:
def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ):
for k, v in d.items():
UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ ) -> int:
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ : str = F"""{repo_id}--{value}"""
return auto_map
def a__ ( lowerCAmelCase__ ) -> Tuple:
for base_class in inspect.getmro(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = base_class.__module__
UpperCAmelCase__ : Optional[int] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 299
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'vit_mae'
def __init__( self : str , _A : Dict=768 , _A : List[str]=12 , _A : Optional[int]=12 , _A : Optional[int]=3_072 , _A : Optional[Any]="gelu" , _A : Tuple=0.0 , _A : Tuple=0.0 , _A : Optional[Any]=0.0_2 , _A : Optional[Any]=1e-12 , _A : Union[str, Any]=224 , _A : str=16 , _A : Dict=3 , _A : List[Any]=True , _A : Optional[int]=16 , _A : Any=512 , _A : str=8 , _A : int=2_048 , _A : Optional[Any]=0.7_5 , _A : int=False , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Union[str, Any] = qkv_bias
UpperCAmelCase__ : Union[str, Any] = decoder_num_attention_heads
UpperCAmelCase__ : int = decoder_hidden_size
UpperCAmelCase__ : int = decoder_num_hidden_layers
UpperCAmelCase__ : Dict = decoder_intermediate_size
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = norm_pix_loss
| 299
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : str = DatasetDict(
{
'''train''': dataset['''train'''].select(lowerCAmelCase__ ),
'''validation''': dataset['''train'''].select(lowerCAmelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Any = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Dict = 8
else:
UpperCAmelCase__ : List[Any] = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# New Code #
UpperCAmelCase__ : List[str] = []
# Download the dataset
UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Any = config['''lr''']
UpperCAmelCase__ : Any = int(config['''num_epochs'''] )
UpperCAmelCase__ : Any = int(config['''seed'''] )
UpperCAmelCase__ : Dict = int(config['''batch_size'''] )
UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
# New Code #
# Create our folds:
UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
UpperCAmelCase__ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
UpperCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Dict = outputs.loss
UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
UpperCAmelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase__ : int = []
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 )
UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ )
def a__ ( ) -> Any:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 299
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'trajectory_transformer'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , _A : Any=100 , _A : List[Any]=5 , _A : Dict=1 , _A : List[Any]=1 , _A : Optional[int]=249 , _A : str=6 , _A : Any=17 , _A : Dict=25 , _A : int=4 , _A : Any=4 , _A : List[Any]=128 , _A : str=0.1 , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[str]=0.0_0_0_6 , _A : Union[str, Any]=512 , _A : Dict=0.0_2 , _A : Dict=1e-12 , _A : List[str]=1 , _A : int=True , _A : int=1 , _A : Dict=50_256 , _A : str=50_256 , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : List[str] = action_weight
UpperCAmelCase__ : str = reward_weight
UpperCAmelCase__ : Any = value_weight
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Tuple = block_size
UpperCAmelCase__ : int = action_dim
UpperCAmelCase__ : Optional[Any] = observation_dim
UpperCAmelCase__ : List[str] = transition_dim
UpperCAmelCase__ : Dict = learning_rate
UpperCAmelCase__ : Dict = n_layer
UpperCAmelCase__ : Union[str, Any] = n_head
UpperCAmelCase__ : str = n_embd
UpperCAmelCase__ : int = embd_pdrop
UpperCAmelCase__ : Dict = attn_pdrop
UpperCAmelCase__ : int = resid_pdrop
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Dict = kaiming_initializer_range
UpperCAmelCase__ : Optional[Any] = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 299
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : Optional[int] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) )
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Optional[int] = processor(text=_A )
UpperCAmelCase__ : List[str] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : List[Any] = processor.decode(_A )
UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Any , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : str = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : Optional[int] = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : Optional[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[int] = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : str = -2_0.0
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : List[str] = os.listdir(_A )
UpperCAmelCase__ : Any = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : int = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : str = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : List[Any] = iter(_A )
UpperCAmelCase__ : Optional[Any] = next(_A )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 299
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = CpmAntTokenizer
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Any = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
UpperCAmelCase__ : Union[str, Any] = '''今天天气真好!'''
UpperCAmelCase__ : int = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
UpperCAmelCase__ : Any = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = '''今天天气真好!'''
UpperCAmelCase__ : int = [tokenizer.bos_token] + tokens
UpperCAmelCase__ : str = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(_A )
self.assertEqual(_A , _A )
| 299
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]:
UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 1_00 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ = datasets.load_iris()
UpperCamelCase__ = iris.data[:, :2]
UpperCamelCase__ = (iris.target != 0) * 1
UpperCamelCase__ = 0.1
UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def a__ ( lowerCAmelCase__ ) -> Dict:
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 299
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['image_processor', 'feature_extractor']
lowerCAmelCase__ = 'TvltImageProcessor'
lowerCAmelCase__ = 'TvltFeatureExtractor'
def __init__( self : Optional[Any] , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__(image_processor=_A , feature_extractor=_A )
UpperCAmelCase__ : Union[str, Any] = image_processor
UpperCAmelCase__ : List[Any] = feature_extractor
def __call__( self : Optional[Any] , _A : int=None , _A : Optional[int]=None , _A : Union[str, Any]=None , _A : Tuple=None , _A : List[str]=False , _A : Union[str, Any]=False , *_A : Optional[Any] , **_A : int , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
UpperCAmelCase__ : Union[str, Any] = None
if images is not None:
UpperCAmelCase__ : str = self.image_processor(_A , mask_pixel=_A , *_A , **_A )
if images_mixed is not None:
UpperCAmelCase__ : Tuple = self.image_processor(_A , is_mixed=_A , *_A , **_A )
if audio is not None:
UpperCAmelCase__ : Dict = self.feature_extractor(
_A , *_A , sampling_rate=_A , mask_audio=_A , **_A )
UpperCAmelCase__ : Optional[int] = {}
if audio is not None:
output_dict.update(_A )
if images is not None:
output_dict.update(_A )
if images_mixed_dict is not None:
output_dict.update(_A )
return output_dict
@property
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor.model_input_names
UpperCAmelCase__ : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 299
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'new-model'
if is_tf_available():
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''bert-base-cased'''
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = '''bert-base-cased'''
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = copy.deepcopy(model.config )
UpperCAmelCase__ : Tuple = ['''FunnelBaseModel''']
UpperCAmelCase__ : int = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , _A )
UpperCAmelCase__ : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config()
UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ : str = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'efficientformer'
def __init__( self : List[Any] , _A : List[int] = [3, 2, 6, 4] , _A : List[int] = [48, 96, 224, 448] , _A : List[bool] = [True, True, True, True] , _A : int = 448 , _A : int = 32 , _A : int = 4 , _A : int = 7 , _A : int = 5 , _A : int = 8 , _A : int = 4 , _A : float = 0.0 , _A : int = 16 , _A : int = 3 , _A : int = 3 , _A : int = 3 , _A : int = 2 , _A : int = 1 , _A : float = 0.0 , _A : int = 1 , _A : bool = True , _A : bool = True , _A : float = 1e-5 , _A : str = "gelu" , _A : float = 0.0_2 , _A : float = 1e-12 , _A : int = 224 , _A : float = 1e-05 , **_A : Tuple , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Tuple = downsamples
UpperCAmelCase__ : Dict = dim
UpperCAmelCase__ : Any = key_dim
UpperCAmelCase__ : Optional[int] = attention_ratio
UpperCAmelCase__ : Tuple = resolution
UpperCAmelCase__ : Union[str, Any] = pool_size
UpperCAmelCase__ : Optional[int] = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : str = drop_path_rate
UpperCAmelCase__ : List[str] = num_metaad_blocks
UpperCAmelCase__ : Optional[Any] = distillation
UpperCAmelCase__ : Any = use_layer_scale
UpperCAmelCase__ : Any = layer_scale_init_value
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : int = batch_norm_eps
| 299
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def a__ ( lowerCAmelCase__ ) -> List[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
UpperCAmelCase__ : Any = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
UpperCAmelCase__ : Any = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
UpperCAmelCase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase__ : Union[str, Any] = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase__ : int = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase__ : Dict = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
UpperCAmelCase__ : Any = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase__ : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
UpperCAmelCase__ : Any = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase__ : str = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase__ : List[str] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase__ : str = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
UpperCAmelCase__ : Dict = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase__ : Optional[Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase__ : str = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase__ : Optional[int] = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
UpperCAmelCase__ : Dict = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
UpperCAmelCase__ : Dict = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
UpperCAmelCase__ : Dict = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
UpperCAmelCase__ : str = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase__ : Optional[int] = key.split('''.''' )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase__ : Optional[Any] = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase__ : List[str] = val[:dim, :]
UpperCAmelCase__ : str = val[dim : dim * 2, :]
UpperCAmelCase__ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase__ : List[str] = val[:dim]
UpperCAmelCase__ : str = val[dim : dim * 2]
UpperCAmelCase__ : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase__ : Optional[int] = key.split('''.''' )
UpperCAmelCase__ : List[str] = int(key_split[3] )
UpperCAmelCase__ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ : List[str] = val[:dim, :]
UpperCAmelCase__ : List[str] = val[
dim : dim * 2, :
]
UpperCAmelCase__ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase__ : Optional[Any] = val[:dim]
UpperCAmelCase__ : str = val[dim : dim * 2]
UpperCAmelCase__ : Dict = val[-dim:]
else:
UpperCAmelCase__ : Optional[Any] = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase__ : str = val.squeeze_()
else:
UpperCAmelCase__ : Any = val
return orig_state_dict
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="groupvit-gcc-yfcc" , lowerCAmelCase__=False ) -> List[Any]:
UpperCAmelCase__ : Union[str, Any] = GroupViTConfig()
UpperCAmelCase__ : Dict = GroupViTModel(lowerCAmelCase__ ).eval()
UpperCAmelCase__ : Optional[Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
UpperCAmelCase__ : Optional[Any] = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
UpperCAmelCase__ : str = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase__ : Union[str, Any] = prepare_img()
UpperCAmelCase__ : Optional[int] = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
UpperCAmelCase__ : int = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase__ : Dict = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase__ : List[str] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
UpperCamelCase__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = XLNetTokenizer
lowerCAmelCase__ = XLNetTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[int] = XLNetTokenizer(_A , keep_accents=_A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''<s>'''
UpperCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(_A ) , 1_006 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLNetTokenizer(_A , keep_accents=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] )
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLNetTokenizer(_A , do_lower_case=_A )
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLNetTokenizer(_A , do_lower_case=_A )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
UpperCAmelCase__ : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase__ : Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {'''input_ids''': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 299
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299
| 1
|
'''simple docstring'''
import itertools
import math
def a__ ( lowerCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def a__ ( lowerCAmelCase__ = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = XLMRobertaTokenizer
lowerCAmelCase__ = XLMRobertaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''<pad>'''
UpperCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_002 )
def lowercase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A )
UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A )
UpperCAmelCase__ : str = pickle.dumps(_A )
pickle.loads(_A )
def lowercase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_A )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = '''Hello World!'''
UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase__ : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 299
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a__ ( lowerCAmelCase__ ) -> int:
if not postfix_notation:
return 0
UpperCAmelCase__ : List[str] = {'''+''', '''-''', '''*''', '''/'''}
UpperCAmelCase__ : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
|
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = n
UpperCAmelCase__ : Union[str, Any] = [None] * self.n
UpperCAmelCase__ : Tuple = 0 # index of the first element
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.size
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.size == 0
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase__ : str = data
UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase__ : Any = self.array[self.front]
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 299
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : str , _A : List[str]=13 , _A : Union[str, Any]=30 , _A : str=2 , _A : Dict=3 , _A : str=True , _A : Dict=True , _A : List[str]=32 , _A : Optional[Any]=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=10 , _A : List[str]=0.0_2 , _A : Any=None , _A : Union[str, Any]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : int = num_patches + 1
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase_ ( self : Any , _A : Any , _A : Optional[Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ViTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Optional[int] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = ViTForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : Dict = ViTForMaskedImageModeling(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : str = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : int , _A : Any , _A : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.type_sequence_label_size
UpperCAmelCase__ : List[str] = ViTForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[Any] = ViTForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ViTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = ViTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> str:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_A )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Optional[int] = prepare_img()
UpperCAmelCase__ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_A )
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : List[str] = image_processor(images=_A , return_tensors='''pt''' )
UpperCAmelCase__ : str = inputs.pixel_values.to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(_A , interpolate_pos_encoding=_A )
# verify the logits
UpperCAmelCase__ : Dict = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''pt''' )
UpperCAmelCase__ : Any = inputs.pixel_values.to(_A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ : str = model(_A )
| 299
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = 1
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(_A ) for k, v in self.__dict__.items()} )
| 299
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : int | None = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Node | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = root
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return str(self.root )
def lowercase_ ( self : str , _A : Node , _A : Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
UpperCAmelCase__ : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
UpperCAmelCase__ : str = new_children
else:
UpperCAmelCase__ : Optional[int] = new_children
else:
UpperCAmelCase__ : Union[str, Any] = new_children
def lowercase_ ( self : Union[str, Any] , _A : Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase_ ( self : int ):
'''simple docstring'''
return self.root is None
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ : List[Any] = new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ : Any = parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ : str = new_node
break
else:
UpperCAmelCase__ : List[str] = parent_node.right
UpperCAmelCase__ : Tuple = parent_node
def lowercase_ ( self : Optional[Any] , *_A : Tuple ):
'''simple docstring'''
for value in values:
self.__insert(_A )
def lowercase_ ( self : Union[str, Any] , _A : int ):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
UpperCAmelCase__ : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ : str = node.left if value < node.value else node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
UpperCAmelCase__ : int = self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ : Tuple = node.right
return node
def lowercase_ ( self : List[Any] , _A : Node | None = None ):
'''simple docstring'''
if node is None:
UpperCAmelCase__ : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ : Optional[int] = self.root
while node.left is not None:
UpperCAmelCase__ : Tuple = node.left
return node
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
UpperCAmelCase__ : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase_ ( self : List[str] , _A : Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase_ ( self : str , _A : Any=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase_ ( self : Dict , _A : list , _A : Node | None ):
'''simple docstring'''
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def a__ ( lowerCAmelCase__ ) -> list[Node]:
UpperCAmelCase__ : Union[str, Any] = []
if curr_node is not None:
UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def a__ ( ) -> None:
UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ : str = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 299
| 1
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "first_exhausted" , ) -> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowerCAmelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase__ ).__name__}.""" )
if i == 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , stopping_strategy=lowerCAmelCase__ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , stopping_strategy=lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , ) -> DatasetType:
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(lowerCAmelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase__ ).__name__}.""" )
if i == 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase__ , info=lowerCAmelCase__ , split=lowerCAmelCase__ , axis=lowerCAmelCase__ )
| 299
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''')
UpperCamelCase__ = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
UpperCamelCase__ = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
UpperCamelCase__ = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
UpperCamelCase__ = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
UpperCamelCase__ = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
UpperCamelCase__ = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
UpperCamelCase__ = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
UpperCamelCase__ = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
UpperCamelCase__ = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase__ : int = value
elif weight_type == "running_mean":
UpperCAmelCase__ : int = value
elif weight_type == "running_var":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = []
if task == "s2t":
UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : List[Any] = MAPPING_S2T
UpperCAmelCase__ : int = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Tuple = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase__ : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : Dict = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Union[str, Any] = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
UpperCAmelCase__ : Optional[int] = '''weight'''
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked'''
else:
UpperCAmelCase__ : Union[str, Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Any = int(items[0] )
UpperCAmelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : str = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : str = config.max_text_positions
UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : int = 6_00
UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions
UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
UpperCAmelCase__ : Tuple = 18_76
UpperCAmelCase__ : Optional[Any] = config.max_speech_positions
UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
UpperCAmelCase__ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 299
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : List[Any] , *_A : List[str] , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[Any] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Tuple , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 299
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : int = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = fairseq_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : Tuple = '''weight'''
else:
UpperCAmelCase__ : Optional[Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : int = UniSpeechSatConfig()
UpperCAmelCase__ : Tuple = ''''''
if is_finetuned:
UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 299
| 1
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCAmelCase__ : Tuple = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCAmelCase__ : Any = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase__ : Optional[int] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCAmelCase__ : Optional[Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase__ )-1}""" )
if "norm" in key:
UpperCAmelCase__ : Optional[int] = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase__ : str = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCAmelCase__ : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase__ )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase__ : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase__ : Tuple = key[key.find('''block''' ) + len('''block''' )]
UpperCAmelCase__ : Tuple = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase__ )-1}""" )
if "attn.q" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCAmelCase__ : str = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCAmelCase__ : int = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCAmelCase__ : str = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCAmelCase__ : Tuple = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCAmelCase__ : Tuple = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCAmelCase__ : Dict = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase__ : Tuple = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCAmelCase__ : List[Any] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase__ )-1}""" )
if "bot_conv" in key:
UpperCAmelCase__ : Tuple = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCAmelCase__ : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCAmelCase__ : Optional[Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCAmelCase__ : Dict = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCAmelCase__ : Tuple = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCAmelCase__ : Tuple = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCAmelCase__ : Optional[int] = key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCAmelCase__ : Any = value
return new_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase__ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase__ : int = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase__ : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase__ : Any = kv_bias[config.hidden_sizes[i] :]
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=None ) -> int:
UpperCAmelCase__ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase__ : str = GLPNImageProcessor()
# prepare image
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCAmelCase__ : Any = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase__ : Tuple = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase__ : str = model(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase__ : Any = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
UpperCAmelCase__ : Dict = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase__ : List[Any] = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
UpperCamelCase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 299
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ = random.Random()
if is_torch_available():
import torch
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]:
if rng is None:
UpperCAmelCase__ : List[str] = global_rng
UpperCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = min_seq_length
UpperCAmelCase__ : str = max_seq_length
UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ : Optional[Any] = feature_size
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : Tuple = return_attention_mask
UpperCAmelCase__ : str = do_normalize
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ):
'''simple docstring'''
def _flatten(_A : Union[str, Any] ):
return list(itertools.chain(*_A ) )
if equal_length:
UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ASTFeatureExtractor
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = ASTFeatureExtractionTester(self )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ : Any = np.asarray(_A )
UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values
UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor()
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 299
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
def __init__( self : str , _A : Tuple , _A : str=2 , _A : Optional[Any]=True , _A : Any=False , _A : Optional[Any]=10 , _A : List[str]=3 , _A : Dict=32 * 4 , _A : int=32 * 6 , _A : int=4 , _A : List[str]=32 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : str = use_auxiliary_loss
UpperCAmelCase__ : List[str] = num_queries
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Optional[Any] = min_size
UpperCAmelCase__ : int = max_size
UpperCAmelCase__ : List[str] = num_labels
UpperCAmelCase__ : str = mask_feature_size
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
UpperCAmelCase__ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
UpperCAmelCase__ : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
UpperCAmelCase__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ : int = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self : Optional[int] , _A : int , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = output.encoder_hidden_states
UpperCAmelCase__ : str = output.pixel_decoder_hidden_states
UpperCAmelCase__ : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_config.decoder_layers )
def lowercase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Dict , _A : List[Any]=False ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : Tuple = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(pixel_values=_A , pixel_mask=_A )
UpperCAmelCase__ : Union[str, Any] = model(_A , output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def lowercase_ ( self : str , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(_A : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(pixel_values=_A , pixel_mask=_A )
UpperCAmelCase__ : Tuple = model(_A )
comm_check_on_output(_A )
UpperCAmelCase__ : Union[str, Any] = model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = MaskFormerModelTester(self )
UpperCAmelCase__ : str = ConfigTester(self , config_class=_A , has_text_modality=_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase__ : Any = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = (self.model_tester.min_size,) * 2
UpperCAmelCase__ : List[str] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_A ),
'''class_labels''': torch.zeros(2 , 10 , device=_A ).long(),
}
UpperCAmelCase__ : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
UpperCAmelCase__ : Tuple = model(**_A )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A ).to(_A )
UpperCAmelCase__ : Dict = model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase__ : Any = self.all_model_classes[1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase__ : Any = model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.all_model_classes[1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[Any] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase__ : Any = model(_A , mask_labels=_A , class_labels=_A )
UpperCAmelCase__ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase__ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase__ : Any = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase__ = 1e-4
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_A )
UpperCAmelCase__ : List[str] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(_A , return_tensors='''pt''' ).to(_A )
UpperCAmelCase__ : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_A )
.eval()
)
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Dict = image_processor(_A , return_tensors='''pt''' ).to(_A )
UpperCAmelCase__ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**_A )
# masks_queries_logits
UpperCAmelCase__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase__ : Union[str, Any] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase__ : int = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
UpperCAmelCase__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase__ : int = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_A )
.eval()
)
UpperCAmelCase__ : List[Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(_A , return_tensors='''pt''' ).to(_A )
UpperCAmelCase__ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**_A )
# masks_queries_logits
UpperCAmelCase__ : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase__ : List[str] = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
UpperCAmelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_A )
.eval()
)
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : int = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCAmelCase__ : Union[str, Any] = inputs['''pixel_values'''].to(_A )
UpperCAmelCase__ : Any = [el.to(_A ) for el in inputs['''mask_labels''']]
UpperCAmelCase__ : Any = [el.to(_A ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase__ : int = model(**_A )
self.assertTrue(outputs.loss is not None )
| 299
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values()
UpperCAmelCase__ : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
UpperCAmelCase__ : Any = '''Hello, world. How are you?'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase__ : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = False
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase__ = '''pytorch_model.bin'''
UpperCamelCase__ = '''pytorch_model.bin.index.json'''
UpperCamelCase__ = '''adapter_config.json'''
UpperCamelCase__ = '''adapter_model.bin'''
UpperCamelCase__ = '''adapter_model.safetensors'''
UpperCamelCase__ = '''tf_model.h5'''
UpperCamelCase__ = '''tf_model.h5.index.json'''
UpperCamelCase__ = '''model.ckpt'''
UpperCamelCase__ = '''flax_model.msgpack'''
UpperCamelCase__ = '''flax_model.msgpack.index.json'''
UpperCamelCase__ = '''model.safetensors'''
UpperCamelCase__ = '''model.safetensors.index.json'''
UpperCamelCase__ = '''config.json'''
UpperCamelCase__ = '''preprocessor_config.json'''
UpperCamelCase__ = FEATURE_EXTRACTOR_NAME
UpperCamelCase__ = '''generation_config.json'''
UpperCamelCase__ = '''modelcard.json'''
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def a__ ( lowerCAmelCase__ ) -> Tuple:
if version.parse(lowerCAmelCase__ ) < version.parse(lowerCAmelCase__ ):
if "dev" in min_version:
UpperCAmelCase__ : Union[str, Any] = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCAmelCase__ : int = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 299
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float:
UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase__ : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 299
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[int] , *_A : Optional[int] , **_A : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 299
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
|
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]:
UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_A : Any , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 299
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase__ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase__ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase__ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ = False
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return 100
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : List[Any] = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.dummy_unet
UpperCAmelCase__ : Optional[Any] = self.dummy_movq
UpperCAmelCase__ : List[str] = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase__ : Union[str, Any] = DDIMScheduler(**_A )
UpperCAmelCase__ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : str , _A : List[str] , _A : Dict=0 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
UpperCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Tuple = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase__ : int = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Tuple = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = '''cpu'''
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : int = self.pipeline_class(**_A )
UpperCAmelCase__ : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase__ : Dict = output.images
UpperCAmelCase__ : int = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : str = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase__ : Optional[Any] = init_image.resize((512, 512) )
UpperCAmelCase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase__ : str = torch.from_numpy(np.array(_A ) ).float() / 2_5_5.0
UpperCAmelCase__ : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase__ : List[str] = '''A robot, 4k photo'''
UpperCAmelCase__ : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase__ : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Any = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = pipe_prior(
_A , image=_A , strength=0.8_5 , generator=_A , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase__ : str = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A , _A )
| 299
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 299
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase_ ( unittest.TestCase , __a ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = load_tool('''text-to-speech''' )
self.tool.setup()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = self.tool('''hey''' )
UpperCAmelCase__ : Dict = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def lowercase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = self.tool('''hey''' )
UpperCAmelCase__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 299
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase_ ( __a ):
def __get__( self : str , _A : Tuple , _A : List[str]=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if cached is None:
UpperCAmelCase__ : Dict = self.fget(_A )
setattr(_A , _A , _A )
return cached
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> Any:
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> int:
return _is_numpy(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name )
UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
UpperCAmelCase__ : List[Any] = first_field.items()
UpperCAmelCase__ : Optional[int] = True
else:
try:
UpperCAmelCase__ : Optional[int] = iter(_A )
UpperCAmelCase__ : Optional[int] = True
except TypeError:
UpperCAmelCase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase__ : List[str] = element[1]
elif first_field is not None:
UpperCAmelCase__ : Optional[Any] = first_field
else:
for field in class_fields:
UpperCAmelCase__ : Optional[int] = getattr(self , field.name )
if v is not None:
UpperCAmelCase__ : str = v
def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ):
'''simple docstring'''
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ):
'''simple docstring'''
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : List[str] , _A : Any ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , _A : Union[str, Any] , _A : str ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ):
'''simple docstring'''
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class lowerCamelCase_ ( __a , __a ):
@classmethod
def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ):
'''simple docstring'''
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'longest'
lowerCAmelCase__ = 'max_length'
lowerCAmelCase__ = 'do_not_pad'
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'pt'
lowerCAmelCase__ = 'tf'
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = 'jax'
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : List[ContextManager] ):
'''simple docstring'''
UpperCAmelCase__ : str = context_managers
UpperCAmelCase__ : int = ExitStack()
def __enter__( self : str ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self : Dict , *_A : List[Any] , **_A : str ):
'''simple docstring'''
self.stack.__exit__(*_A , **_A )
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Dict = model_class.__name__
UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any:
def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ):
for k, v in d.items():
UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ ) -> int:
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ : str = F"""{repo_id}--{value}"""
return auto_map
def a__ ( lowerCAmelCase__ ) -> Tuple:
for base_class in inspect.getmro(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = base_class.__module__
UpperCAmelCase__ : Optional[int] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 299
| 1
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase__ = '''pt'''
elif is_tf_available():
UpperCamelCase__ = '''tf'''
else:
UpperCamelCase__ = '''jax'''
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = ByTaTokenizer
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def lowercase_ ( self : Tuple , **_A : Optional[int] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Any , _A : List[Any] , _A : Dict=False , _A : Dict=20 , _A : Optional[Any]=5 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
for i in range(len(_A ) ):
try:
UpperCAmelCase__ : str = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase__ : str = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
UpperCAmelCase__ : List[Any] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
UpperCAmelCase__ : List[str] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
UpperCAmelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase__ : int = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase__ : str = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
UpperCAmelCase__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
UpperCAmelCase__ : Optional[int] = ''' ''' + output_txt
UpperCAmelCase__ : List[Any] = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.ta_base_tokenizer
UpperCAmelCase__ : List[str] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
UpperCAmelCase__ : Dict = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.ta_base_tokenizer
UpperCAmelCase__ : Dict = '''Unicode €.'''
UpperCAmelCase__ : Optional[int] = tokenizer(_A )
UpperCAmelCase__ : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
UpperCAmelCase__ : str = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
UpperCAmelCase__ : int = tokenizer('''e è é ê ë''' )
UpperCAmelCase__ : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.ta_base_tokenizer
UpperCAmelCase__ : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
UpperCAmelCase__ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase__ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.ta_base_tokenizer
UpperCAmelCase__ : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Tuple = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.ta_base_tokenizer
UpperCAmelCase__ : Dict = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase__ : str = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.ta_base_tokenizer
UpperCAmelCase__ : int = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase__ : Any = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase__ : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase__ : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase__ : int = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : int = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(_A )
UpperCAmelCase__ : List[str] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
UpperCAmelCase__ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase__ : str = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase__ : str = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase__ : Any = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(_A )
UpperCAmelCase__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase__ : Any = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase__ : str = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
UpperCAmelCase__ : Union[str, Any] = [f"""<extra_id_{i}>""" for i in range(125 )]
UpperCAmelCase__ : str = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase__ : List[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase__ : Any = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase__ : Any = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
UpperCAmelCase__ : Tuple = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase_ ( self : int ):
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : Optional[int] = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase__ : str = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : int = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : str = DatasetDict(
{
'''train''': dataset['''train'''].select(lowerCAmelCase__ ),
'''validation''': dataset['''train'''].select(lowerCAmelCase__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Any = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : Dict = 8
else:
UpperCAmelCase__ : List[Any] = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# New Code #
UpperCAmelCase__ : List[str] = []
# Download the dataset
UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Any = config['''lr''']
UpperCAmelCase__ : Any = int(config['''num_epochs'''] )
UpperCAmelCase__ : Any = int(config['''seed'''] )
UpperCAmelCase__ : Dict = int(config['''batch_size'''] )
UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
# New Code #
# Create our folds:
UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
UpperCAmelCase__ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
UpperCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Dict = outputs.loss
UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
UpperCAmelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase__ : int = []
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : str = model(**lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 )
UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ )
def a__ ( ) -> Any:
UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' )
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 299
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase__ = '''http://www.mocksite.com/file1.txt'''
UpperCamelCase__ = '''"text": ["foo", "foo"]'''
UpperCamelCase__ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowerCamelCase_ :
lowerCAmelCase__ = 2_0_0
lowerCAmelCase__ = {'Content-Length': '100'}
lowerCAmelCase__ = {}
def lowercase_ ( self : List[str] , **_A : Optional[int] ):
'''simple docstring'''
return [bytes(_A , '''utf-8''' )]
def a__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
import requests
monkeypatch.setattr(lowerCAmelCase__ , '''request''' , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = URL
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = url
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = [url]
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = {'''train''': url}
UpperCAmelCase__ : Tuple = '''dummy'''
UpperCAmelCase__ : Optional[Any] = '''downloads'''
UpperCAmelCase__ : Dict = tmp_path
UpperCAmelCase__ : List[str] = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , use_etag=lowerCAmelCase__ , )
UpperCAmelCase__ : Optional[Any] = DownloadManager(dataset_name=lowerCAmelCase__ , download_config=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = dl_manager.download(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = [downloaded_paths]
UpperCAmelCase__ : str = [urls]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
assert "train" in downloaded_paths.keys()
UpperCAmelCase__ : Optional[Any] = downloaded_paths.values()
UpperCAmelCase__ : Tuple = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCAmelCase__ : Any = Path(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCAmelCase__ : List[Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCAmelCase__ : Optional[int] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
UpperCAmelCase__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : str = str(lowerCAmelCase__ )
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : int = filename
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = [filename]
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = {'''train''': filename}
UpperCAmelCase__ : Dict = '''dummy'''
UpperCAmelCase__ : str = xz_file.parent
UpperCAmelCase__ : Tuple = '''extracted'''
UpperCAmelCase__ : Dict = DownloadConfig(
cache_dir=lowerCAmelCase__ , use_etag=lowerCAmelCase__ , )
UpperCAmelCase__ : Optional[Any] = DownloadManager(dataset_name=lowerCAmelCase__ , download_config=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = dl_manager.extract(lowerCAmelCase__ )
UpperCAmelCase__ : Any = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = [extracted_paths]
UpperCAmelCase__ : str = [paths]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
assert "train" in extracted_paths.keys()
UpperCAmelCase__ : List[str] = extracted_paths.values()
UpperCAmelCase__ : List[str] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCAmelCase__ : Dict = Path(lowerCAmelCase__ )
UpperCAmelCase__ : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase__ , etag=lowerCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCAmelCase__ : Optional[int] = extracted_path.read_text()
UpperCAmelCase__ : Any = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowerCAmelCase__ , start=1 ):
UpperCAmelCase__ : List[Any] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : str = request.getfixturevalue(lowerCAmelCase__ )
UpperCAmelCase__ : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
_test_jsonl(lowerCAmelCase__ , lowerCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = request.getfixturevalue(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
_test_jsonl(lowerCAmelCase__ , lowerCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase__ ) , start=1 ):
assert os.path.basename(lowerCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 299
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Tuple = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : Optional[int] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) )
UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Optional[int] = processor(text=_A )
UpperCAmelCase__ : List[str] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : List[Any] = processor.decode(_A )
UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Any , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : str = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : Optional[int] = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : Optional[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[int] = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : str = -2_0.0
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : List[str] = os.listdir(_A )
UpperCAmelCase__ : Any = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : int = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : str = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : List[Any] = iter(_A )
UpperCAmelCase__ : Optional[Any] = next(_A )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 299
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int , _A : int , _A : List[Any]=3 , _A : List[str]=32 , _A : Tuple=3 , _A : str=10 , _A : Union[str, Any]=[10, 20, 30, 40] , _A : Optional[Any]=[1, 1, 2, 1] , _A : Any=True , _A : Any=True , _A : Optional[Any]="relu" , _A : Dict=3 , _A : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Dict = embeddings_size
UpperCAmelCase__ : Any = hidden_sizes
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Dict = scope
UpperCAmelCase__ : str = len(_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = self.get_config()
return config, pixel_values
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = FlaxRegNetModel(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self : List[Any] , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : Dict = FlaxRegNetForImageClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxRegNetModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Dict ):
'''simple docstring'''
return
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(_A : Tuple , _A : List[str] , _A : Any ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : str = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : Dict = model_class(_A )
@jax.jit
def model_jitted(_A : str , **_A : Tuple ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[Any] = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ) -> Any:
UpperCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase__ : Optional[int] = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_A , return_tensors='''np''' )
UpperCAmelCase__ : str = model(**_A )
# verify the logits
UpperCAmelCase__ : Any = (1, 1_000)
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Tuple = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 299
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]:
UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 1_00 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ = datasets.load_iris()
UpperCamelCase__ = iris.data[:, :2]
UpperCamelCase__ = (iris.target != 0) * 1
UpperCamelCase__ = 0.1
UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def a__ ( lowerCAmelCase__ ) -> Dict:
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 299
| 1
|
'''simple docstring'''
from typing import Any
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = data
UpperCAmelCase__ : List[Any] = None
class lowerCamelCase_ :
def __init__( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = None
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
UpperCAmelCase__ : int = temp.next
print()
def lowercase_ ( self : int , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Node(_A )
UpperCAmelCase__ : int = self.head
UpperCAmelCase__ : List[Any] = new_node
def lowercase_ ( self : List[str] , _A : List[str] , _A : Union[str, Any] ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
UpperCAmelCase__ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase__ : Optional[int] = node_a.next
UpperCAmelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase__ : int = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 299
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'new-model'
if is_tf_available():
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''bert-base-cased'''
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = '''bert-base-cased'''
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = copy.deepcopy(model.config )
UpperCAmelCase__ : Tuple = ['''FunnelBaseModel''']
UpperCAmelCase__ : int = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , _A )
UpperCAmelCase__ : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config()
UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ : str = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Optional[int] = [1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 0, 0
UpperCAmelCase__ : int = ugly_nums[ia] * 2
UpperCAmelCase__ : Dict = ugly_nums[ia] * 3
UpperCAmelCase__ : Tuple = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ugly_nums.append(lowerCAmelCase__ )
if next_num == next_a:
ia += 1
UpperCAmelCase__ : Dict = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase__ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase__ : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_0_0) = }""")
| 299
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : Union[str, Any] = image_mean
UpperCAmelCase__ : Optional[int] = image_std
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : int = do_pad
def lowercase_ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase__ : Optional[int] = image_inputs[0]
if isinstance(_A , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase__ : int = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase__ : List[str] = self.size['''shortest_edge''']
UpperCAmelCase__ : Dict = self.size['''shortest_edge''']
else:
UpperCAmelCase__ : int = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0]
UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : str = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target}
# encode them
UpperCAmelCase__ : Optional[int] = DetaImageProcessor()
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase__ : int = json.loads(f.read() )
UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
UpperCAmelCase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
UpperCAmelCase__ : Dict = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
UpperCAmelCase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299
| 1
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase__ : List[Any] = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCAmelCase__ : Any = randint(-50_00 , 50_00 )
return (arr, r)
UpperCamelCase__ = make_dataset()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[int, ...]:
for triplet in permutations(lowerCAmelCase__ , 3 ):
if sum(lowerCAmelCase__ ) == target:
return tuple(sorted(lowerCAmelCase__ ) )
return (0, 0, 0)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase__ : Any = len(lowerCAmelCase__ )
for i in range(n - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase__ : List[Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase__ : Optional[Any] = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase__ : str = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase__ : Any = repeat(setup=lowerCAmelCase__ , stmt=lowerCAmelCase__ , repeat=5 , number=1_00_00 )
UpperCAmelCase__ : Optional[Any] = repeat(setup=lowerCAmelCase__ , stmt=lowerCAmelCase__ , repeat=5 , number=1_00_00 )
return (min(lowerCAmelCase__ ), min(lowerCAmelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'trocr'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Union[str, Any] , _A : Dict=50_265 , _A : Any=1_024 , _A : List[Any]=12 , _A : Any=16 , _A : Optional[Any]=4_096 , _A : Tuple="gelu" , _A : Dict=512 , _A : str=0.1 , _A : Tuple=0.0 , _A : Union[str, Any]=0.0 , _A : Union[str, Any]=2 , _A : Tuple=0.0_2 , _A : Dict=0.0 , _A : Tuple=True , _A : Any=False , _A : int=True , _A : Optional[Any]=True , _A : Optional[Any]=1 , _A : str=0 , _A : List[Any]=2 , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : Optional[int] = decoder_layers
UpperCAmelCase__ : Dict = decoder_attention_heads
UpperCAmelCase__ : str = decoder_ffn_dim
UpperCAmelCase__ : Tuple = activation_function
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : int = dropout
UpperCAmelCase__ : List[str] = attention_dropout
UpperCAmelCase__ : int = activation_dropout
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : Optional[int] = decoder_layerdrop
UpperCAmelCase__ : Tuple = use_cache
UpperCAmelCase__ : int = scale_embedding
UpperCAmelCase__ : Dict = use_learned_position_embeddings
UpperCAmelCase__ : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 299
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'new-model'
if is_tf_available():
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''bert-base-cased'''
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = '''bert-base-cased'''
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = copy.deepcopy(model.config )
UpperCAmelCase__ : Tuple = ['''FunnelBaseModel''']
UpperCAmelCase__ : int = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , _A )
UpperCAmelCase__ : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config()
UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ : str = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = XLMRobertaTokenizer
lowerCAmelCase__ = XLMRobertaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''<pad>'''
UpperCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_002 )
def lowercase_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A )
UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : str ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A )
UpperCAmelCase__ : str = pickle.dumps(_A )
pickle.loads(_A )
def lowercase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_A )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = '''Hello World!'''
UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase__ : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 299
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int:
UpperCAmelCase__ : Optional[int] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCAmelCase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 299
|
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
| 1
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowerCAmelCase__ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowerCAmelCase__ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowerCAmelCase__ , help='''where to store parsed gold_data_path file''' , )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
UpperCAmelCase__ : Dict = json.load(lowerCAmelCase__ )
for dpr_record in tqdm(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = dpr_record['''question''']
UpperCAmelCase__ : str = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowerCAmelCase__ ) + '''\n''' )
if __name__ == "__main__":
main()
| 299
|
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = n
UpperCAmelCase__ : Union[str, Any] = [None] * self.n
UpperCAmelCase__ : Tuple = 0 # index of the first element
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.size
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.size == 0
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase__ : str = data
UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase__ : Any = self.array[self.front]
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 299
| 1
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCamelCase__ = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
UpperCamelCase__ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
UpperCamelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def a__ ( lowerCAmelCase__ ) -> dict[str, int]:
UpperCAmelCase__ : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( lowerCAmelCase__ ) -> str:
return x[0]
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = get_letter_count(lowerCAmelCase__ )
UpperCAmelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
UpperCAmelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = ''''''.join(freq_to_letter[freq] )
UpperCAmelCase__ : Tuple = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : int = get_frequency_order(lowerCAmelCase__ )
UpperCAmelCase__ : Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.