code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError("""input must be a negative integer""" )
lowerCAmelCase_ :int = len(bin(_lowercase )[3:] )
lowerCAmelCase_ :Union[str, Any] = bin(abs(_lowercase ) - (1 << binary_number_length) )[3:]
lowerCAmelCase_ :Any = (
(
"1"
+ "0" * (binary_number_length - len(_lowercase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
"""simple docstring"""
a : Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
a : List[Any] = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
a : Optional[Any] = ""
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
a : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 105
| 0
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = iter(UpperCAmelCase_ )
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def _lowercase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def _lowercase ( __lowerCAmelCase ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = generate_table(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_input(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
SCREAMING_SNAKE_CASE__ : Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
SCREAMING_SNAKE_CASE__ : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_table(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
SCREAMING_SNAKE_CASE__ : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 356
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a :List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
a :str = json.load(f)
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_a )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _a ( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(_a )
SCREAMING_SNAKE_CASE__ : Any = self.get_model(_a )
SCREAMING_SNAKE_CASE__ : Tuple = bleu_data[pair]["""src"""]
SCREAMING_SNAKE_CASE__ : Any = bleu_data[pair]["""tgt"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , return_tensors="""pt""" , truncation=_a , padding="""longest""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
SCREAMING_SNAKE_CASE__ : Dict = calculate_bleu(_a , _a )
print(_a )
self.assertGreaterEqual(scores["""bleu"""] , _a )
| 56
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( __UpperCAmelCase ) -> float:
lowerCAmelCase__ : Union[str, Any] = 0.00
lowerCAmelCase__ : Any = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase__ : int = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(__UpperCAmelCase )
first_sum += 1 / float(__UpperCAmelCase )
index += 1
return 1 / first_sum
def lowercase_ ( __UpperCAmelCase ) -> float:
lowerCAmelCase__ : Union[str, Any] = 0.00
lowerCAmelCase__ : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase__ : Dict = f"""Resistor at index {index} has a negative value!"""
raise ValueError(__UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_A = """bart"""
_A = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCAmelCase__ : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = qar_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCAmelCase__ : List[str] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCAmelCase__ : Dict = sas_model.eval()
else:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
lowerCAmelCase__ : Union[str, Any] = faiss.StandardGpuResources()
lowerCAmelCase__ : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCAmelCase__ : Dict = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
lowerCAmelCase__ : str = faiss.IndexFlatIP(128 )
lowerCAmelCase__ : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (None, None)
lowerCAmelCase__ : Optional[int] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
lowerCAmelCase__ : List[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCAmelCase__ : Tuple = elia["""train_eli5"""]
lowerCAmelCase__ : Dict = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
lowerCAmelCase__ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
_A , _A , _A = load_indexes()
_A , _A , _A , _A = load_models()
_A , _A = load_train_data()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=10 ) -> Optional[Any]:
lowerCAmelCase__ : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="wiki40b" , __UpperCAmelCase="dense" , __UpperCAmelCase=10 ) -> List[str]:
if source == "none":
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCAmelCase , )
lowerCAmelCase__ : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCAmelCase__ : Optional[Any] = """question: {} context: {}""".format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None),
} )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0.8 ) -> Optional[int]:
with torch.no_grad():
lowerCAmelCase__ : List[Any] = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_A = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_A = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_A = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_A = st.sidebar.checkbox("""Demo options""")
if demo_options:
_A = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_A = action_list.index(action_st)
_A = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_A = show_type == """Show full text of passages"""
else:
_A = 3
_A = True
_A = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_A = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_A = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_A = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_A = """wiki40b"""
_A = """dense"""
_A = """beam"""
_A = 2
_A = 6_4
_A = 2_5_6
_A = None
_A = None
_A = st.sidebar.checkbox("""Generation options""")
if generate_options:
_A = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_A = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_A = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_A = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_A = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_A = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_A = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_A = None
# start main text
_A = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_A = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_A = st.text_input("""Enter your question here:""", """""")
else:
_A = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_A , _A = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
_A , _A = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
_A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_A = support_list[:1_0]
_A = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_A , _A = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_A , _A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_A = res[1].strip()
if sec_titles == "":
_A = """[{}]({})""".format(res[0], wiki_url)
else:
_A = sec_titles.split(""" & """)
_A = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_A = find_nearest_training(question)
_A = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_A = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_A = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 242
| 1
|
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _SCREAMING_SNAKE_CASE )
else:
return binary_search(a_list[midpoint + 1 :] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(""",""")]
SCREAMING_SNAKE_CASE_ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE_ = """""" if binary_search(sequence, target) else """not """
print(F'''{target} was {not_str}found in {sequence}''')
| 193
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[str] = "relu" ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=lowerCamelCase__ ,stride=lowerCamelCase__ ,padding=kernel_size // 2 ,groups=lowerCamelCase__ ,bias=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
SCREAMING_SNAKE_CASE = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,stride=lowerCamelCase__ ,bias=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.attention(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetSELayer(lowerCamelCase__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,) ,*[layer(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for _ in range(depth - 1 )] ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.layers(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : RegNetConfig ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ ,config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,depth=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(lowerCamelCase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ ,hidden_states=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = RegNetConfig
__snake_case : Union[str, Any] = "regnet"
__snake_case : Optional[Any] = "pixel_values"
__snake_case : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(lowerCamelCase__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = RegNetEmbeddings(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RegNetEncoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.encoder(
lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ ,pooler_output=lowerCamelCase__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = RegNetModel(lowerCamelCase__ )
# classification head
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[torch.LongTensor] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ ,logits=lowerCamelCase__ ,hidden_states=outputs.hidden_states )
| 193
| 1
|
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__a )
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__a : int ,__a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_a : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item ,__a )
for item in array )
_a : Optional[int] = answer
return answer
_a : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__a ,__a )
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
_a : str = [0] * (target + 1)
_a : Optional[Any] = 1
for i in range(1 ,target + 1 ):
for j in range(__a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = 3
a__ = 5
a__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 235
|
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__a )
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__a : int ,__a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_a : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item ,__a )
for item in array )
_a : Optional[int] = answer
return answer
_a : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__a ,__a )
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
_a : str = [0] * (target + 1)
_a : Optional[Any] = 1
for i in range(1 ,target + 1 ):
for j in range(__a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = 3
a__ = 5
a__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 235
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A ( a_ ,a_ ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__UpperCamelCase : Dict =flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase : Dict =torch.permute(a_ ,(0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a_ ):
# linear layer
__UpperCamelCase : str =flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase : Union[str, Any] =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase : Any =flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def A ( a_ ,a_ ,a_ ) -> Any:
if "metadata" in layer:
__UpperCamelCase : Any =layer.split('metadata' )
__UpperCamelCase : Optional[Any] =''.join(split_layer[0] )[:-1]
__UpperCamelCase : Any =[tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__UpperCamelCase : Optional[int] =layer.split('kvstore' )
__UpperCamelCase : Any =''.join(split_layer[0] )[:-1]
__UpperCamelCase : Union[str, Any] =[tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__UpperCamelCase : int =layer.split('/' )
__UpperCamelCase : Any ='/'.join(split_layer[:-1] )
__UpperCamelCase : List[str] =(split_layer[-1],)
if "kvstore/path" in layer:
__UpperCamelCase : List[Any] =F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
__UpperCamelCase : Optional[int] ='file'
else:
__UpperCamelCase : int =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A ( a_ ,a_ ) -> Optional[Any]:
__UpperCamelCase : Optional[int] =rename_keys(a_ )
__UpperCamelCase : List[str] ={}
for k, v in current_block.items():
__UpperCamelCase : Any =v
__UpperCamelCase : Any =new_current_block
torch.save(a_ ,a_ )
def A ( a_ ,a_ ,a_ ,a_ ,a_ = WEIGHTS_NAME ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] =convert_file_size_to_int(a_ )
__UpperCamelCase : Any =[]
__UpperCamelCase : Tuple ={}
__UpperCamelCase : str =0
__UpperCamelCase : int =0
os.makedirs(a_ ,exist_ok=a_ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' ,'rb' ) as fp:
__UpperCamelCase : Tuple =serialization.msgpack_restore(fp.read() )['optimizer']['target']
__UpperCamelCase : List[Any] =flatten_dict(a_ ,sep='/' )
__UpperCamelCase : str ={}
for layer in checkpoint_info.keys():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] =get_key_and_tensorstore_dict(
a_ ,a_ ,a_ )
if curr_real_layer_name in all_layers:
__UpperCamelCase : Optional[Any] =content
else:
__UpperCamelCase : List[Any] ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__UpperCamelCase : Any =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__UpperCamelCase : Any =torch.tensor(a_ )
__UpperCamelCase : Optional[int] =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__UpperCamelCase , __UpperCamelCase : List[str] =rename_base_flax_keys(tuple(key.split('/' ) ) ,a_ )
__UpperCamelCase : int ='/'.join(a_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__UpperCamelCase : Any =os.path.join(
a_ ,weights_name.replace('.bin' ,F'-{len(a_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(a_ ,a_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__UpperCamelCase : str ={}
__UpperCamelCase : str =0
__UpperCamelCase : int =raw_weights.to(getattr(a_ ,a_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__UpperCamelCase : Any =os.path.join(a_ ,weights_name.replace('.bin' ,F'-{len(a_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(a_ ,a_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__UpperCamelCase : Union[str, Any] ={}
__UpperCamelCase : Any ={}
for idx, shard in enumerate(a_ ):
__UpperCamelCase : Optional[Any] =weights_name.replace(
'.bin' ,F'-{idx+1:05d}-of-{len(a_ ):05d}.bin' ) # len(sharded_state_dicts):05d}
__UpperCamelCase : List[Any] =os.path.join(a_ ,weights_name.replace('.bin' ,F'-{idx+1:05d}-of-???.bin' ) )
os.rename(a_ ,os.path.join(a_ ,a_ ) )
__UpperCamelCase : Any =shard
for key in shard:
__UpperCamelCase : Union[str, Any] =shard_file
# Add the metadata
__UpperCamelCase : int ={'total_size': total_size}
__UpperCamelCase : Any ={'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(a_ ,a_ ) ,'w' ,encoding='utf-8' ) as f:
__UpperCamelCase : List[str] =json.dumps(a_ ,indent=2 ,sort_keys=a_ ) + '\n'
f.write(a_ )
return metadata, index
if __name__ == "__main__":
A_ :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
A_ :Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A ( ) -> str:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__UpperCamelCase : Optional[int] =SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__UpperCamelCase : int =SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' ,device_map='auto' )
__UpperCamelCase : List[str] =TaTokenizer.from_pretrained('t5-small' )
__UpperCamelCase : Any ='A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__UpperCamelCase : Dict =tokenizer(a_ ,return_tensors='pt' ).input_ids
__UpperCamelCase : Any =model.generate(a_ ,decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 245
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ :Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =["""pixel_values"""]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase : Dict =get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase : List[str] =get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ , param_name='crop_size' )
__UpperCamelCase : Optional[Any] =do_resize
__UpperCamelCase : Optional[int] =size
__UpperCamelCase : List[Any] =resample
__UpperCamelCase : Optional[int] =do_center_crop
__UpperCamelCase : Optional[int] =crop_size
__UpperCamelCase : str =do_rescale
__UpperCamelCase : Any =rescale_factor
__UpperCamelCase : Union[str, Any] =do_normalize
__UpperCamelCase : Union[str, Any] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase : List[Any] =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase : Any =do_convert_rgb
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase : Tuple =get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : int =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : Dict =size if size is not None else self.size
__UpperCamelCase : List[Any] =get_size_dict(lowerCamelCase__ , param_name='size' , default_to_square=lowerCamelCase__ )
__UpperCamelCase : Tuple =resample if resample is not None else self.resample
__UpperCamelCase : Any =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase : Tuple =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase : Any =get_size_dict(lowerCamelCase__ , param_name='crop_size' , default_to_square=lowerCamelCase__ )
__UpperCamelCase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase : Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase : Dict =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase : Optional[Any] =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase : List[str] =image_std if image_std is not None else self.image_std
__UpperCamelCase : Union[str, Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase : int =make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase : Union[str, Any] =[convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase : List[str] =[to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__UpperCamelCase : str =[self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__UpperCamelCase : Union[str, Any] =[self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__UpperCamelCase : Optional[Any] =[self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__UpperCamelCase : Optional[int] =[self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__UpperCamelCase : List[Any] =[to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__UpperCamelCase : List[Any] ={'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 245
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Any = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': 5_12,
'''google/realm-cc-news-pretrained-encoder''': 5_12,
'''google/realm-cc-news-pretrained-scorer''': 5_12,
'''google/realm-cc-news-pretrained-openqa''': 5_12,
'''google/realm-orqa-nq-openqa''': 5_12,
'''google/realm-orqa-nq-reader''': 5_12,
'''google/realm-orqa-wq-openqa''': 5_12,
'''google/realm-orqa-wq-reader''': 5_12,
}
lowerCAmelCase__ : List[str] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RealmTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="[UNK]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[PAD]" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ : Optional[Any] = getattr(__UpperCamelCase , normalizer_state.pop('type' ) )
snake_case__ : Union[str, Any] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Dict = tokenize_chinese_chars
snake_case__ : List[str] = normalizer_class(**__UpperCamelCase )
snake_case__ : str = do_lower_case
def __a ( self , __UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = PaddingStrategy.MAX_LENGTH
snake_case__ : int = text
snake_case__ : List[str] = kwargs.pop('text_pair' , __UpperCamelCase )
snake_case__ : Tuple = kwargs.pop('return_tensors' , __UpperCamelCase )
snake_case__ : Dict = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__UpperCamelCase ):
if batch_text_pair is not None:
snake_case__ : str = batch_text_pair[idx]
else:
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[Any] = super().__call__(__UpperCamelCase , __UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
snake_case__ : List[Any] = encoded_candidates.get('input_ids' )
snake_case__ : Any = encoded_candidates.get('attention_mask' )
snake_case__ : Optional[Any] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__UpperCamelCase )
snake_case__ : List[str] = {key: item for key, item in output_data.items() if len(__UpperCamelCase ) != 0}
return BatchEncoding(__UpperCamelCase , tensor_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 143
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 4
snake_case__ : List[Any] = 3
snake_case__ : int = (32, 32)
snake_case__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : str = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
snake_case__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 4
snake_case__ : List[Any] = 4
snake_case__ : List[str] = (32, 32)
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> int:
'''simple docstring'''
return (4, 32, 32)
@property
def __a ( self ) -> str:
'''simple docstring'''
return (4, 32, 32)
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
snake_case__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
snake_case__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model.to(__UpperCamelCase )
snake_case__ : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model_accelerate.to(__UpperCamelCase )
model_accelerate.eval()
snake_case__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : Union[str, Any] = noise.to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
snake_case__ : str = model_accelerate(__UpperCamelCase , __UpperCamelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__ , snake_case__ : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase )
model_normal_load.to(__UpperCamelCase )
model_normal_load.eval()
snake_case__ : List[str] = model_normal_load(__UpperCamelCase , __UpperCamelCase )['sample']
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCamelCase )
snake_case__ : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : List[Any] = noise.to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : List[str] = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ : int = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 ) )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self , __UpperCamelCase=(32, 32) ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = 4
snake_case__ : Dict = 3
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __a ( self ) -> int:
'''simple docstring'''
return (3, 32, 32)
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
snake_case__ : str = self.dummy_input
return init_dict, inputs_dict
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ , snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
snake_case__ : Dict = self.dummy_input
snake_case__ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(__UpperCamelCase )
snake_case__ : List[Any] = noise
snake_case__ : Any = model(**__UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCamelCase )
snake_case__ : Optional[Any] = 4
snake_case__ : str = 3
snake_case__ : List[Any] = (256, 256)
snake_case__ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : str = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCamelCase )
snake_case__ : Dict = 4
snake_case__ : List[str] = 3
snake_case__ : Union[str, Any] = (32, 32)
snake_case__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : Tuple = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
| 143
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A__, A__, A__ ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ : int = LxmertConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE_ : Any = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__, A__, A__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A__ )
if __name__ == "__main__":
lowerCAmelCase__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ : Dict =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 361
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a__ ( A__ ):
if is_torch_version('<', '2.0.0' ) or not hasattr(A__, '_dynamo' ):
return False
return isinstance(A__, torch._dynamo.eval_frame.OptimizedModule )
def a__ ( A__, A__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ : List[str] = is_compiled_module(A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[Any] = model
SCREAMING_SNAKE_CASE_ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'forward' )
SCREAMING_SNAKE_CASE_ : Any = model.__dict__.pop('_original_forward', A__ )
if original_forward is not None:
while hasattr(A__, '__wrapped__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ : Any = forward
if getattr(A__, '_converted_to_transformer_engine', A__ ):
convert_model(A__, to_transformer_engine=A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[str] = model
SCREAMING_SNAKE_CASE_ : Dict = compiled_model
return model
def a__ ( ):
PartialState().wait_for_everyone()
def a__ ( A__, A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__, A__ )
elif PartialState().local_process_index == 0:
torch.save(A__, A__ )
@contextmanager
def a__ ( **A__ ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a__ ( A__ ):
if not hasattr(A__, '__qualname__' ) and not hasattr(A__, '__name__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, '__class__', A__ )
if hasattr(A__, '__qualname__' ):
return obj.__qualname__
if hasattr(A__, '__name__' ):
return obj.__name__
return str(A__ )
def a__ ( A__, A__ ):
for key, value in source.items():
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = destination.setdefault(A__, {} )
merge_dicts(A__, A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = value
return destination
def a__ ( A__ = None ):
if port is None:
SCREAMING_SNAKE_CASE_ : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 162
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase :
_SCREAMING_SNAKE_CASE = LEDConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=4 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCAmelCase = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
lowerCAmelCase = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
lowerCAmelCase = global_attention_mask
return config, inputs_dict
def _snake_case ( self , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFLEDModel(config=lowercase ).get_decoder()
lowerCAmelCase = inputs_dict["""input_ids"""]
lowerCAmelCase = input_ids[:1, :]
lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
lowerCAmelCase = 1
# first forward pass
lowerCAmelCase = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
lowerCAmelCase , lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase = model(lowercase , attention_mask=lowercase )[0]
lowerCAmelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFLEDModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase )
def _snake_case ( self ) -> Any:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = tf.zeros_like(inputs_dict["""attention_mask"""] )
lowerCAmelCase = 2
lowerCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
lowerCAmelCase = True
lowerCAmelCase = self.model_tester.seq_length
lowerCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase ):
lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase ):
lowerCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = model(self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ) -> List[str]:
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ = 1e-4
@slow
@require_tf
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
lowerCAmelCase = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowerCAmelCase = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowerCAmelCase = prepare_led_inputs_dict(model.config , lowercase , lowercase )
lowerCAmelCase = model(**lowercase )[0]
lowerCAmelCase = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
lowerCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-3 )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
lowerCAmelCase = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowerCAmelCase = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
lowerCAmelCase = prepare_led_inputs_dict(model.config , lowercase , lowercase )
lowerCAmelCase = model(**lowercase )[0]
lowerCAmelCase = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
lowerCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-3 , rtol=1e-3 )
| 46
|
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out
| 163
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( a_ ):
@staticmethod
@abstractmethod
def UpperCamelCase_ ( snake_case__ ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 359
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase_ ( a_ ):
_A : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_A : ClassVar[Features] = Features({'image': Image()} )
_A : ClassVar[Features] = Features({'labels': ClassLabel} )
_A : str = "image"
_A : str = "labels"
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 248
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A : List[Any] = None
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = '▁'
A : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A : List[Any] = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A : Optional[Any] = {
'google/pegasus-xsum': 5_12,
}
class lowerCamelCase (UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PegasusTokenizer
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="<pad>" , __magic_name__ : Optional[Any]="</s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<mask_2>" , __magic_name__ : str="<mask_1>" , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=103 , **__magic_name__ : Dict , ) -> Dict:
SCREAMING_SNAKE_CASE_ = offset
if additional_special_tokens is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(__lowerCamelCase )}, but is'''
F''' {type(__lowerCamelCase )}''' )
SCREAMING_SNAKE_CASE_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(__lowerCamelCase ) , self.offset - 1 )
]
if len(set(__lowerCamelCase ) ) != len(__lowerCamelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE_ = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , pad_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , mask_token=__lowerCamelCase , mask_token_sent=__lowerCamelCase , offset=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def __A ( self : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __A ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] = None , __magic_name__ : Dict = False ) -> Any:
if already_has_special_tokens:
return self._special_token_mask(__lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(__lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __A ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=None ) -> Tuple:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] = None ) -> str:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 118
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193
| 0
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int=13 , lowerCamelCase : int=30 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Tuple=3 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[Any]=5 , lowerCamelCase : Tuple=4 , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : int="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : List[Any]=10 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : Dict=0.6 , lowerCamelCase : Dict=None , ) -> Any:
__snake_case : str = parent
__snake_case : Tuple = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Dict = patch_size
__snake_case : List[Any] = num_channels
__snake_case : Tuple = is_training
__snake_case : Dict = use_labels
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = mask_ratio
__snake_case : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__snake_case : List[str] = (image_size // patch_size) ** 2
__snake_case : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : int ) -> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __snake_case ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] ) -> List[str]:
__snake_case : List[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : int ) -> Optional[Any]:
__snake_case : Dict = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
__snake_case : List[Any] = (self.image_size // self.patch_size) ** 2
__snake_case : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : int = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Optional[int] = model(lowerCamelCase )
__snake_case : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = False
def __snake_case ( self : List[str] ) -> Dict:
__snake_case : Union[str, Any] = ViTMAEModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __snake_case ( self : str ) -> Tuple:
pass
def __snake_case ( self : Any ) -> List[str]:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(lowerCamelCase )
__snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : int ) -> Optional[Any]:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ) -> str:
# make masks reproducible
np.random.seed(2 )
__snake_case : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__snake_case : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__snake_case : Any = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__snake_case : Optional[int] = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Optional[Any]:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : List[Any] = outputs[0].cpu().numpy()
__snake_case : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__snake_case : List[Any] = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
__snake_case : Any = after_outputs[0].cpu().numpy()
__snake_case : Tuple = 0
__snake_case : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __snake_case ( self : int ) -> List[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __snake_case ( self : Dict ) -> Dict:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : List[str] ) -> Any:
pass
@slow
def __snake_case ( self : int ) -> str:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : List[Any] ) -> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __snake_case ( self : Tuple ) -> Optional[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__snake_case : Optional[int] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
__snake_case : List[Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : List[str] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__snake_case : Tuple = ViTMAEConfig()
__snake_case : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__snake_case : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__snake_case : int = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
__snake_case : Tuple = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 134
|
import numpy
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : numpy.ndarray , lowerCamelCase : numpy.ndarray ) -> None:
__snake_case : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__snake_case : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__snake_case : Optional[int] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__snake_case : int = numpy.random.rand(3 , 1 )
# Real output values provided.
__snake_case : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__snake_case : Optional[int] = numpy.zeros(output_array.shape )
def __snake_case ( self : List[Any] ) -> numpy.ndarray:
__snake_case : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self : Union[str, Any] ) -> None:
__snake_case : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__snake_case : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__snake_case : Optional[Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self : List[str] , lowerCamelCase : numpy.ndarray , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__snake_case : Any = self.feedforward()
self.back_propagation()
if give_loss:
__snake_case : str = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def __snake_case ( self : Optional[Any] , lowerCamelCase : numpy.ndarray ) -> int:
__snake_case : Any = input_arr
__snake_case : List[str] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__snake_case : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__snake_case : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( __lowerCamelCase ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( __lowerCamelCase ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
__snake_case : Dict = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__snake_case : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__snake_case : int = TwoHiddenLayerNeuralNetwork(
input_array=__lowerCamelCase , output_array=__lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowerCamelCase , iterations=1_0 , give_loss=__lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 134
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36
|
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : List[Any] = limit + 1
__snake_case : List[str] = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326
| 0
|
'''simple docstring'''
import math
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 10_001 ):
try:
__a : Any = int(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__a : list[int] = []
__a : List[Any] = 2
while len(_SCREAMING_SNAKE_CASE ) < nth:
if is_prime(_SCREAMING_SNAKE_CASE ):
primes.append(_SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(_SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
A : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 6
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = 3_84
__lowercase : str = 7
if "tiny" in model_name:
__lowercase : List[str] = 96
__lowercase : Any = (2, 2, 6, 2)
__lowercase : Dict = (3, 6, 12, 24)
elif "small" in model_name:
__lowercase : str = 96
__lowercase : Optional[int] = (2, 2, 18, 2)
__lowercase : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__lowercase : Tuple = 1_28
__lowercase : Tuple = (2, 2, 18, 2)
__lowercase : int = (4, 8, 16, 32)
__lowercase : str = 12
__lowercase : Any = 5_12
elif "large" in model_name:
__lowercase : List[str] = 1_92
__lowercase : List[Any] = (2, 2, 18, 2)
__lowercase : Optional[Any] = (6, 12, 24, 48)
__lowercase : Optional[int] = 12
__lowercase : Optional[Any] = 7_68
# set label information
__lowercase : Any = 1_50
__lowercase : Tuple = '''huggingface/label-files'''
__lowercase : int = '''ade20k-id2label.json'''
__lowercase : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Any = SwinConfig(
embed_dim=__UpperCamelCase , depths=__UpperCamelCase , num_heads=__UpperCamelCase , window_size=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__lowercase : List[Any] = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = dct.pop(__UpperCamelCase )
__lowercase : Any = val
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : Dict = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowercase : int = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : List[Any] = in_proj_weight[:dim, :]
__lowercase : Tuple = in_proj_bias[: dim]
__lowercase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : int = in_proj_bias[
dim : dim * 2
]
__lowercase : str = in_proj_weight[
-dim :, :
]
__lowercase : List[Any] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : str = x.shape
__lowercase : List[str] = x.reshape(__UpperCamelCase , 4 , in_channel // 4 )
__lowercase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : Optional[int] = x.shape
__lowercase : Union[str, Any] = x.reshape(__UpperCamelCase , in_channel // 4 , 4 )
__lowercase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = x.shape[0]
__lowercase : List[str] = x.reshape(4 , in_channel // 4 )
__lowercase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Union[str, Any] = x.shape[0]
__lowercase : List[str] = x.reshape(in_channel // 4 , 4 )
__lowercase : Union[str, Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__lowercase : Any = model_name_to_url[model_name]
__lowercase : Any = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , file_name=__UpperCamelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
__lowercase : Tuple = get_upernet_config(__UpperCamelCase )
__lowercase : List[Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase : Optional[Any] = state_dict.pop(__UpperCamelCase )
if "bn" in key:
__lowercase : List[Any] = key.replace('''bn''' , '''batch_norm''' )
__lowercase : Optional[Any] = val
# rename keys
__lowercase : Tuple = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_reduction_order(__UpperCamelCase )
if "norm" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_norm_order(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
__lowercase : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
__lowercase : Union[str, Any] = SegformerImageProcessor()
__lowercase : int = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowercase : List[Any] = model(__UpperCamelCase )
__lowercase : Union[str, Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowercase : Tuple = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__lowercase : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__lowercase : Optional[int] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__lowercase : Any = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 249
| 0
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowercase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase = 101 ):
__a : Optional[int] = length
def __len__( self ):
return self.length
def __getitem__( self , _UpperCAmelCase ):
return i
class __lowercase :
'''simple docstring'''
def __call__( self , _UpperCAmelCase ):
return {"input_ids": torch.tensor(lowercase_ ), "labels": torch.tensor(lowercase_ )}
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__a : int = nn.Linear(120 , 80 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowercase ( _lowerCamelCase ):
'''simple docstring'''
@require_torch_neuroncore
def _lowerCamelCase ( self ):
__a : List[str] = f"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
__a : List[str] = self.get_auto_remove_tmp_dir()
__a : Tuple = f"""--output_dir {output_dir}""".split()
__a : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowercase ( _lowerCamelCase ):
'''simple docstring'''
@require_torch_multi_gpu
def _lowerCamelCase ( self ):
__a : int = f"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
__a : Any = self.get_auto_remove_tmp_dir()
__a : List[str] = f"""--output_dir {output_dir}""".split()
__a : Optional[int] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowercase_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A = HfArgumentParser((TrainingArguments,))
A = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
A = DummyDataset(dataset_length)
def __A ( a_ :List[Any]) -> Dict:
__a : List[str] = list(range(len(__UpperCAmelCase)))
__a : Optional[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""")
return {"success": success}
A = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = 2
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = None
| 356
|
"""simple docstring"""
import os
import string
import sys
A = 1 << 8
A = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A = KEYMAP['''up''']
A = KEYMAP['''left''']
if sys.platform == "win32":
A = []
A = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A = ord(str(i))
def __A ( ) -> Dict:
if os.name == "nt":
import msvcrt
__a : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a_) == 0:
# Read the keystroke
__a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__a : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__a : Union[str, Any] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(a_)
if ord(a_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26))
__a : str = chr(KEYMAP['''esc'''])
except KeyError:
__a : str = cha[1]
else:
__a : Optional[Any] = ch.decode(a_)
else:
__a : Union[str, Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
__a : Any = sys.stdin.fileno()
__a : List[str] = termios.tcgetattr(a_)
try:
tty.setraw(a_)
__a : int = sys.stdin.read(1)
finally:
termios.tcsetattr(a_ , termios.TCSADRAIN , a_)
return ch
def __A ( ) -> str:
__a : Any = get_raw_chars()
if ord(a_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a_) == KEYMAP["esc"]:
__a : str = get_raw_chars()
if ord(a_) == KEYMAP["mod_int"]:
__a : List[str] = get_raw_chars()
if ord(a_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a__ ( unittest.TestCase ):
lowerCamelCase : Optional[int] =MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : int =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__lowerCamelCase = text_generator('''This is a test''' , do_sample=a )
self.assertEqual(
a , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__lowerCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
a , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__lowerCamelCase = text_generator('''This is a test''' , do_sample=a , num_return_sequences=2 , return_tensors=a )
self.assertEqual(
a , [
{'''generated_token_ids''': ANY(a )},
{'''generated_token_ids''': ANY(a )},
] , )
__lowerCamelCase = text_generator.model.config.eos_token_id
__lowerCamelCase = '''<pad>'''
__lowerCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'''generated_token_ids''': ANY(a )},
{'''generated_token_ids''': ANY(a )},
],
[
{'''generated_token_ids''': ANY(a )},
{'''generated_token_ids''': ANY(a )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__lowerCamelCase = text_generator('''This is a test''' , do_sample=a )
self.assertEqual(
a , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__lowerCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=a )
self.assertEqual(
a , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Dict , a : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = TextGenerationPipeline(model=a , tokenizer=a )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = '''Hello I believe in'''
__lowerCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__lowerCamelCase = text_generator(a )
self.assertEqual(
a , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__lowerCamelCase = text_generator(a , stop_sequence=''' fe''' )
self.assertEqual(a , [{'''generated_text''': '''Hello I believe in fe'''}] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : List[str] , a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = text_generator.model
__lowerCamelCase = text_generator.tokenizer
__lowerCamelCase = text_generator('''This is a test''' )
self.assertEqual(a , [{'''generated_text''': ANY(a )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__lowerCamelCase = text_generator('''This is a test''' , return_full_text=a )
self.assertEqual(a , [{'''generated_text''': ANY(a )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__lowerCamelCase = pipeline(task='''text-generation''' , model=a , tokenizer=a , return_full_text=a )
__lowerCamelCase = text_generator('''This is a test''' )
self.assertEqual(a , [{'''generated_text''': ANY(a )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__lowerCamelCase = text_generator('''This is a test''' , return_full_text=a )
self.assertEqual(a , [{'''generated_text''': ANY(a )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__lowerCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a )
self.assertEqual(
a , [
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__lowerCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a )
self.assertEqual(
a , [
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
[{'''generated_text''': ANY(a )}, {'''generated_text''': ANY(a )}],
] , )
with self.assertRaises(a ):
__lowerCamelCase = text_generator('''test''' , return_full_text=a , return_text=a )
with self.assertRaises(a ):
__lowerCamelCase = text_generator('''test''' , return_full_text=a , return_tensors=a )
with self.assertRaises(a ):
__lowerCamelCase = text_generator('''test''' , return_text=a , return_tensors=a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__lowerCamelCase = text_generator('''''' )
self.assertEqual(a , [{'''generated_text''': ANY(a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__lowerCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__lowerCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
__lowerCamelCase = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
__lowerCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowerCamelCase = pipe('''This is a test''' )
self.assertEqual(
a , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowerCamelCase = pipe('''This is a test''' )
self.assertEqual(
a , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__lowerCamelCase = pipe('''This is a test''' )
self.assertEqual(
a , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
import torch
__lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
import torch
__lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=a , top_p=0.5 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = '''Hello world'''
__lowerCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__lowerCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__lowerCamelCase = logging.get_logger('''transformers.generation.utils''' )
__lowerCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a ) as cl:
__lowerCamelCase = text_generator(a , max_length=10 , max_new_tokens=1 )
self.assertIn(a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a ) as cl:
__lowerCamelCase = text_generator(a , max_new_tokens=1 )
self.assertNotIn(a , cl.out )
with CaptureLogger(a ) as cl:
__lowerCamelCase = text_generator(a , max_length=10 )
self.assertNotIn(a , cl.out )
| 67
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __A :
a__ : str = LEDConfig
a__ : Any = {}
a__ : Any = """gelu"""
def __init__(self : Tuple , __a : Union[str, Any] , __a : List[str]=13 , __a : Union[str, Any]=7 , __a : Optional[int]=True , __a : Dict=False , __a : Dict=99 , __a : List[str]=32 , __a : List[Any]=2 , __a : Any=4 , __a : Any=37 , __a : int=0.1 , __a : Tuple=0.1 , __a : Optional[int]=20 , __a : str=2 , __a : Union[str, Any]=1 , __a : Dict=0 , __a : Optional[int]=4 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase (self : Any ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase_ = prepare_led_inputs_dict(__a , __a , __a )
UpperCAmelCase_ = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
UpperCAmelCase_ = global_attention_mask
return config, inputs_dict
def _lowercase (self : List[str] , __a : str , __a : Optional[int] ):
UpperCAmelCase_ = TFLEDModel(config=__a ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__a , attention_mask=__a , use_cache=__a )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : Tuple=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a__ : Any = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
a__ : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : Any = True
a__ : str = False
a__ : Optional[Any] = False
a__ : int = False
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = TFLEDModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a )
def _lowercase (self : str ):
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = tf.zeros_like(inputs_dict["attention_mask"] )
UpperCAmelCase_ = 2
UpperCAmelCase_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.model_tester.seq_length
UpperCAmelCase_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
UpperCAmelCase_ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Dict ):
UpperCAmelCase_ = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase (self : str ):
pass
def _lowercase (self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( snake_case_ : Dict ) -> str:
'''simple docstring'''
return tf.constant(snake_case_ , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_: Dict =1E-4
@slow
@require_tf
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
UpperCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
UpperCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = prepare_led_inputs_dict(model.config , __a , __a )
UpperCAmelCase_ = model(**__a )[0]
UpperCAmelCase_ = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
UpperCAmelCase_ = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def _lowercase (self : Any ):
UpperCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
UpperCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = prepare_led_inputs_dict(model.config , __a , __a )
UpperCAmelCase_ = model(**__a )[0]
UpperCAmelCase_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
UpperCAmelCase_ = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 106
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase__ : Any = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase__ : int = {
"""gpt-neox-20b""": 2_048,
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any =["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : Tuple="<|endoftext|>" , UpperCAmelCase__ : Optional[Any]=False , **UpperCAmelCase__ : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Dict = getattr(UpperCAmelCase__ , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
SCREAMING_SNAKE_CASE : int = pre_tok_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
def _lowercase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def _lowercase ( self : Dict , UpperCAmelCase__ : "Conversation" ) ->List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 245
|
import sys
from collections import defaultdict
class a__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
def _lowercase ( self : str , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
return self.node_position[vertex]
def _lowercase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = pos
def _lowercase ( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) ->int:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE : Tuple = 2 * start + 1
else:
SCREAMING_SNAKE_CASE : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = temp, tempa
SCREAMING_SNAKE_CASE : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase__ )
self.top_to_bottom(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = position[index]
while index != 0:
SCREAMING_SNAKE_CASE : Optional[int] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE : str = heap[parent]
SCREAMING_SNAKE_CASE : Any = position[parent]
self.set_position(position[parent] , UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = val
SCREAMING_SNAKE_CASE : Optional[Any] = temp
self.set_position(UpperCAmelCase__ , UpperCAmelCase__ )
break
SCREAMING_SNAKE_CASE : List[Any] = parent
else:
SCREAMING_SNAKE_CASE : List[Any] = val
SCREAMING_SNAKE_CASE : List[Any] = temp
self.set_position(UpperCAmelCase__ , 0 )
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = len(UpperCAmelCase__ ) // 2 - 1
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase__ , UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = positions[0]
SCREAMING_SNAKE_CASE : Optional[int] = sys.maxsize
self.top_to_bottom(UpperCAmelCase__ , 0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
return temp
def __lowercase ( _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = Heap()
SCREAMING_SNAKE_CASE : List[Any] = [0] * len(_A )
SCREAMING_SNAKE_CASE : List[str] = [-1] * len(_A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE : int = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE : List[Any] = []
for vertex in range(len(_A ) ):
distance_tv.append(sys.maxsize )
positions.append(_A )
heap.node_position.append(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(_A , _A )
for _ in range(1 , len(_A ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = heap.delete_minimum(_A , _A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE : int = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_A )]
):
SCREAMING_SNAKE_CASE : List[Any] = distance
heap.bottom_to_top(
_A , heap.get_position(_A ) , _A , _A )
SCREAMING_SNAKE_CASE : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Union[str, Any] = int(input("""Enter number of edges: """).strip())
UpperCAmelCase__ : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 245
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 9
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9
| 1
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __magic_name__ ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ : int = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[Any] = 2
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
super().__init__(*lowercase_, **lowercase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a__ =None
if self.model.config.prefix is not None:
a__ =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a__ =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a__, a__, a__ =self._sanitize_parameters(prefix=lowercase_, **self._forward_params )
a__ ={**self._preprocess_params, **preprocess_params}
a__ ={**self._forward_params, **forward_params}
def _UpperCAmelCase ( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_, ) -> List[Any]:
"""simple docstring"""
a__ ={}
if prefix is not None:
a__ =prefix
if prefix:
a__ =self.tokenizer(
lowercase_, padding=lowercase_, add_special_tokens=lowercase_, return_tensors=self.framework )
a__ =prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
a__ =handle_long_generation
preprocess_params.update(lowercase_ )
a__ =generate_kwargs
a__ ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
a__ =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
a__ =ReturnType.TENSORS
if return_type is not None:
a__ =return_type
if clean_up_tokenization_spaces is not None:
a__ =clean_up_tokenization_spaces
if stop_sequence is not None:
a__ =self.tokenizer.encode(lowercase_, add_special_tokens=lowercase_ )
if len(lowercase_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
a__ =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*lowercase_, **lowercase_ )
def __call__( self, lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, lowercase_, lowercase_="", lowercase_=None, **lowercase_ ) -> int:
"""simple docstring"""
a__ =self.tokenizer(
prefix + prompt_text, padding=lowercase_, add_special_tokens=lowercase_, return_tensors=self.framework )
a__ =prompt_text
if handle_long_generation == "hole":
a__ =inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
a__ =generate_kwargs['''max_new_tokens''']
else:
a__ =generate_kwargs.get('''max_length''', self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a__ =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
a__ =inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
a__ =inputs['''attention_mask'''][:, -keep_length:]
return inputs
def _UpperCAmelCase ( self, lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ =model_inputs['''input_ids''']
a__ =model_inputs.get('''attention_mask''', lowercase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a__ =None
a__ =None
a__ =1
else:
a__ =input_ids.shape[0]
a__ =model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a__ =generate_kwargs.pop('''prefix_length''', 0 )
if prefix_length > 0:
a__ ='''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
a__ =generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a__ ='''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a__ =self.model.generate(input_ids=lowercase_, attention_mask=lowercase_, **lowercase_ )
a__ =generated_sequence.shape[0]
if self.framework == "pt":
a__ =generated_sequence.reshape(lowercase_, out_b // in_b, *generated_sequence.shape[1:] )
elif self.framework == "tf":
a__ =tf.reshape(lowercase_, (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _UpperCAmelCase ( self, lowercase_, lowercase_=ReturnType.FULL_TEXT, lowercase_=True ) -> Optional[int]:
"""simple docstring"""
a__ =model_outputs['''generated_sequence'''][0]
a__ =model_outputs['''input_ids''']
a__ =model_outputs['''prompt_text''']
a__ =generated_sequence.numpy().tolist()
a__ =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a__ ={'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a__ =self.tokenizer.decode(
lowercase_, skip_special_tokens=lowercase_, clean_up_tokenization_spaces=lowercase_, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a__ =0
else:
a__ =len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=lowercase_, clean_up_tokenization_spaces=lowercase_, ) )
if return_type == ReturnType.FULL_TEXT:
a__ =prompt_text + text[prompt_length:]
else:
a__ =text[prompt_length:]
a__ ={'''generated_text''': all_text}
records.append(lowercase_ )
return records
| 188
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase: Optional[Any] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( a_, a_, a_, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = AltDiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
__lowerCAmelCase: Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0)
__lowerCAmelCase: Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
__lowerCAmelCase: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
__lowerCAmelCase: Optional[int] = CLIPTextModel(_lowercase)
__lowerCAmelCase: Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
__lowerCAmelCase: int = 7_7
__lowerCAmelCase: Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=0)-> Any:
'''simple docstring'''
if str(_lowercase).startswith("mps"):
__lowerCAmelCase: Tuple = torch.manual_seed(_lowercase)
else:
__lowerCAmelCase: Tuple = torch.Generator(device=_lowercase).manual_seed(_lowercase)
__lowerCAmelCase: Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: int = self.get_dummy_components()
torch.manual_seed(0)
__lowerCAmelCase: List[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase: Any = RobertaSeriesModelWithTransformation(_lowercase)
__lowerCAmelCase: List[str] = text_encoder
__lowerCAmelCase: Any = AltDiffusionPipeline(**_lowercase)
__lowerCAmelCase: Optional[int] = alt_pipe.to(_lowercase)
alt_pipe.set_progress_bar_config(disable=_lowercase)
__lowerCAmelCase: int = self.get_dummy_inputs(_lowercase)
__lowerCAmelCase: Optional[int] = """A photo of an astronaut"""
__lowerCAmelCase: List[Any] = alt_pipe(**_lowercase)
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase: Optional[int] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: Optional[Any] = self.get_dummy_components()
__lowerCAmelCase: str = PNDMScheduler(skip_prk_steps=_lowercase)
torch.manual_seed(0)
__lowerCAmelCase: int = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase: Dict = RobertaSeriesModelWithTransformation(_lowercase)
__lowerCAmelCase: Tuple = text_encoder
__lowerCAmelCase: Tuple = AltDiffusionPipeline(**_lowercase)
__lowerCAmelCase: Dict = alt_pipe.to(_lowercase)
alt_pipe.set_progress_bar_config(disable=_lowercase)
__lowerCAmelCase: int = self.get_dummy_inputs(_lowercase)
__lowerCAmelCase: List[str] = alt_pipe(**_lowercase)
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase: Union[str, Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Dict)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=_lowercase)
__lowerCAmelCase: Dict = alt_pipe.to(_lowercase)
alt_pipe.set_progress_bar_config(disable=_lowercase)
__lowerCAmelCase: List[Any] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase: str = torch.manual_seed(0)
__lowerCAmelCase: int = alt_pipe([prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np")
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: List[Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler")
__lowerCAmelCase: str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=_lowercase , safety_checker=_lowercase)
__lowerCAmelCase: Tuple = alt_pipe.to(_lowercase)
alt_pipe.set_progress_bar_config(disable=_lowercase)
__lowerCAmelCase: Optional[Any] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase: Union[str, Any] = torch.manual_seed(0)
__lowerCAmelCase: Union[str, Any] = alt_pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="numpy")
__lowerCAmelCase: Any = output.images
__lowerCAmelCase: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 217
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Tuple = logging.getLogger()
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = argparse.ArgumentParser()
parser.add_argument("""-f""")
a_ : Any = parser.parse_args()
return args.f
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> None:
a_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Dict:
a_ : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowercase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_lowercase )
a_ : Tuple = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowercase )
a_ : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowercase )
| 248
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BeitFeatureExtractor']
UpperCAmelCase_ = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 1
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = [0] * len(UpperCamelCase__ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase__ ) ):
if indegree[i] == 0:
queue.append(UpperCamelCase__ )
while queue:
UpperCAmelCase = queue.pop(0 )
cnt += 1
topo.append(UpperCamelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(UpperCamelCase__ )
if cnt != len(UpperCamelCase__ ):
print('''Cycle exists''' )
else:
print(UpperCamelCase__ )
# Adjacency List of Graph
__A : Dict = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 273
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = module
UpperCAmelCase__ = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
UpperCAmelCase__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase_ : Optional[int] = 2.1_09_65_95_52_69_25_74
lowerCAmelCase_ : int = """Hello my name is"""
lowerCAmelCase_ : Any = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase_ : int = 10
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained(self.model_name )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , """quantization_config""" ) )
UpperCAmelCase__ = config.to_dict()
UpperCAmelCase__ = config.to_diff_dict()
UpperCAmelCase__ = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
UpperCAmelCase__ = self.model_fpaa.get_memory_footprint()
UpperCAmelCase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCAmelCase__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = BitsAndBytesConfig()
UpperCAmelCase__ = True
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase__ = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
UpperCAmelCase__ = self.model_fpaa.to(torch.floataa )
UpperCAmelCase__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCAmelCase__ = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
UpperCAmelCase__ = self.model_fpaa.half()
# Check this does not throw an error
UpperCAmelCase__ = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """t5-small"""
UpperCAmelCase__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCAmelCase__ = AutoTokenizer.from_pretrained(cls.model_name )
UpperCAmelCase__ = """Translate in German: Hello, my dog is cute"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
UpperCAmelCase__ = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCAmelCase__ = None
# test with `t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
UpperCAmelCase__ = modules
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
UpperCAmelCase__ = model.generate(**_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# model_name
UpperCAmelCase__ = """bigscience/bloom-560m"""
UpperCAmelCase__ = """t5-small"""
# Different types of model
UpperCAmelCase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# Sequence classification model
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# CausalLM model
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
# Seq2seq model
UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map="""auto""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCAmelCase__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCAmelCase__ = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
UpperCAmelCase__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """facebook/opt-350m"""
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCAmelCase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCAmelCase__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
UpperCAmelCase__ = LoRALayer(module.q_proj , rank=16 )
UpperCAmelCase__ = LoRALayer(module.k_proj , rank=16 )
UpperCAmelCase__ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCAmelCase__ = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCAmelCase__ = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """gpt2-xl"""
lowerCAmelCase_ : Optional[Any] = 3.31_91_85_48_54_15_21_87
| 61
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = 'Hello world! cécé herlolip'
UpperCAmelCase_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=SCREAMING_SNAKE_CASE__ , large=SCREAMING_SNAKE_CASE__ , share_emb=SCREAMING_SNAKE_CASE__ , use_bert_emb=SCREAMING_SNAKE_CASE__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : storage )
UpperCAmelCase__ = AbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) , SCREAMING_SNAKE_CASE__ )
original.eval()
UpperCAmelCase__ = BertAbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ = encoder_input_ids
UpperCAmelCase__ = decoder_input_ids
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ = original(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = original.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = new_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = new_model.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
UpperCAmelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 61
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=13 , _lowerCamelCase : int=7 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=99 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : Optional[Any]=5 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Dict=37 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[Any]=128 , _lowerCamelCase : Any=32 , _lowerCamelCase : Any=16 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Union[str, Any]=0.0_2 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : Tuple=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowercase ( self : List[str] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Dict ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def lowercase ( self : Optional[int] ):
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.prepare_config_and_inputs()
_snake_case = True
_snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ):
_snake_case = NezhaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , ):
_snake_case = True
_snake_case = NezhaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
_snake_case = NezhaForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
_snake_case = NezhaForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ):
_snake_case = NezhaForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
_snake_case = NezhaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
_snake_case = self.num_labels
_snake_case = NezhaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ):
_snake_case = self.num_labels
_snake_case = NezhaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_choices
_snake_case = NezhaForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
__a = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def lowercase ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=False ):
_snake_case = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def lowercase ( self : Dict ):
_snake_case = NezhaModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def lowercase ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_snake_case = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
def lowercase ( self : str ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def lowercase ( self : int ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = NezhaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_snake_case = True
_snake_case = model_class(config=_lowerCamelCase )
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
_snake_case = torch.jit.trace(
_lowerCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , '''bert.pt''' ) )
_snake_case = torch.jit.load(os.path.join(_lowerCamelCase , '''bert.pt''' ) , map_location=_lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(_lowerCamelCase ) , inputs_dict['''attention_mask'''].to(_lowerCamelCase ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[str] ):
_snake_case = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
_snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_snake_case = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
@slow
def lowercase ( self : Tuple ):
_snake_case = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
_snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_snake_case = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 288
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288
| 1
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case : List[Any] = logging.getLogger(__name__)
class A ( _a ):
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=-1 ) -> List[Any]:
"""simple docstring"""
_a = label_idx
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = mode.value
_a = os.path.join(lowerCAmelCase_ , F'{mode}.txt' )
_a = 1
_a = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
_a = []
_a = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
_a = []
_a = []
else:
_a = line.split(''' ''' )
words.append(splits[0] )
if len(lowerCAmelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
return examples
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> Dict:
"""simple docstring"""
_a = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowerCAmelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowerCAmelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
_a = f.read().splitlines()
if "O" not in labels:
_a = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A ( _a ):
def __init__( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(label_idx=-2 )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
_a = f.read().splitlines()
if "O" not in labels:
_a = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A ( _a ):
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = mode.value
_a = os.path.join(lowerCAmelCase_ , F'{mode}.txt' )
_a = 1
_a = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowerCAmelCase_ ):
_a = []
_a = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
return examples
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> Dict:
"""simple docstring"""
_a = 0
for sentence in parse_incr(lowerCAmelCase_ ):
_a = preds_list[example_id]
_a = ''''''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowerCAmelCase_ )
example_id += 1
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 179
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=_a ):
lowercase_ = ['torch', 'scipy']
def __init__( self : Tuple , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 179
| 1
|
'''simple docstring'''
import argparse
import copy
def lowerCamelCase ( __lowerCamelCase : int ) ->str:
_SCREAMING_SNAKE_CASE = {}
with open(__lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_SCREAMING_SNAKE_CASE = []
_list.append([line.split()[1], line.split()[2]] )
_SCREAMING_SNAKE_CASE = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_SCREAMING_SNAKE_CASE = []
_list.append([line.split()[0], line.split()[2]] )
_SCREAMING_SNAKE_CASE = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : str ) ->Union[str, Any]:
with open(__lowerCamelCase ) as f:
_SCREAMING_SNAKE_CASE = f.read(1 )
_SCREAMING_SNAKE_CASE = start_node
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = start_node
_SCREAMING_SNAKE_CASE = 0
while visiting not in first_solution:
_SCREAMING_SNAKE_CASE = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCamelCase ) and k[0] not in first_solution:
_SCREAMING_SNAKE_CASE = k[1]
_SCREAMING_SNAKE_CASE = k[0]
first_solution.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = distance_of_first_solution + int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = best_node
first_solution.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_SCREAMING_SNAKE_CASE = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) ->str:
_SCREAMING_SNAKE_CASE = []
for n in solution[1:-1]:
_SCREAMING_SNAKE_CASE = solution.index(__lowerCamelCase )
for kn in solution[1:-1]:
_SCREAMING_SNAKE_CASE = solution.index(__lowerCamelCase )
if n == kn:
continue
_SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = kn
_SCREAMING_SNAKE_CASE = n
_SCREAMING_SNAKE_CASE = 0
for k in _tmp[:-1]:
_SCREAMING_SNAKE_CASE = _tmp[_tmp.index(__lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_SCREAMING_SNAKE_CASE = distance + int(i[1] )
_tmp.append(__lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_SCREAMING_SNAKE_CASE = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) ->str:
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = first_solution
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = distance_of_first_solution
_SCREAMING_SNAKE_CASE = solution
while count <= iters:
_SCREAMING_SNAKE_CASE = find_neighborhood(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution]
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 1
_SCREAMING_SNAKE_CASE = False
while not found:
_SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
if best_solution[i] != solution[i]:
_SCREAMING_SNAKE_CASE = best_solution[i]
_SCREAMING_SNAKE_CASE = solution[i]
break
_SCREAMING_SNAKE_CASE = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = best_solution[:-1]
_SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_SCREAMING_SNAKE_CASE = cost
_SCREAMING_SNAKE_CASE = solution
else:
_SCREAMING_SNAKE_CASE = index_of_best_solution + 1
_SCREAMING_SNAKE_CASE = neighborhood[index_of_best_solution]
if len(__lowerCamelCase ) >= size:
tabu_list.pop(0 )
_SCREAMING_SNAKE_CASE = count + 1
return best_solution_ever, best_cost
def lowerCamelCase ( __lowerCamelCase : List[str]=None ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = generate_neighbours(args.File )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = generate_first_solution(
args.File , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tabu_search(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58
| 1
|
"""simple docstring"""
import requests
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[int] ={"Content-Type": "application/json"}
a__ : Optional[int] =requests.post(_lowercase , json={"text": message_body} , headers=_lowercase )
if response.status_code != 200:
a__ : str =(
"Request to slack returned an error "
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 363
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_vision_model"""
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Tuple =hidden_size
a__ : Any =intermediate_size
a__ : Union[str, Any] =num_hidden_layers
a__ : Optional[Any] =num_attention_heads
a__ : List[str] =patch_size
a__ : int =image_size
a__ : Tuple =initializer_range
a__ : Any =attention_dropout
a__ : List[Any] =layer_norm_eps
a__ : Optional[Any] =hidden_act
a__ : Optional[Any] =qkv_bias
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : Optional[Any] =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_qformer"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =vocab_size
a__ : Optional[Any] =hidden_size
a__ : str =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : Dict =hidden_act
a__ : Optional[int] =intermediate_size
a__ : Union[str, Any] =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : Union[str, Any] =initializer_range
a__ : Optional[int] =layer_norm_eps
a__ : int =position_embedding_type
a__ : int =cross_attention_frequency
a__ : Tuple =encoder_hidden_size
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : str =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Optional[int] =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip"""
_lowercase : List[Any] = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
a__ : List[Any] ={}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
a__ : Tuple ={}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
a__ : Dict ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : Dict =InstructBlipVisionConfig(**lowerCAmelCase__ )
a__ : Union[str, Any] =InstructBlipQFormerConfig(**lowerCAmelCase__ )
a__ : Tuple =text_config["model_type"] if "model_type" in text_config else "opt"
a__ : List[str] =CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
a__ : Union[str, Any] =self.text_config.tie_word_embeddings
a__ : Optional[Any] =self.text_config.is_encoder_decoder
a__ : str =num_query_tokens
a__ : List[Any] =self.vision_config.hidden_size
a__ : str =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : List[Any] =1.0
a__ : List[str] =0.02
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =copy.deepcopy(self.__dict__ )
a__ : int =self.vision_config.to_dict()
a__ : str =self.qformer_config.to_dict()
a__ : str =self.text_config.to_dict()
a__ : List[str] =self.__class__.model_type
return output
| 148
| 0
|
import functools
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not all(isinstance(lowercase_ , lowercase_ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(lowercase_ ) != 3 or not all(isinstance(lowercase_ , lowercase_ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(lowercase_ ) == 0:
return 0
if min(lowercase_ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(lowercase_ ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
A__ = set(lowercase_ )
@functools.cache
def dynamic_programming(lowercase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
UpperCamelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
with open(__UpperCamelCase ,'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(__UpperCamelCase )
return im.convert('RGB' )
@dataclass
class _a :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the training data.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the validation data.'''} )
A : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A : str = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name or path of preprocessor config.'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' ,__UpperCamelCase ,__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task='image-classification' ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(data_args.train_dir ,'**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.validation_dir ,'**' )
SCREAMING_SNAKE_CASE : str = load_dataset(
'imagefolder' ,data_files=__UpperCamelCase ,cache_dir=model_args.cache_dir ,task='image-classification' ,)
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Tuple = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,__UpperCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : int = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : Optional[int] = split['train']
SCREAMING_SNAKE_CASE : int = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : int = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = {}, {}
for i, label in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : Any = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase: Dict ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel=__UpperCamelCase ,finetuning_task='image-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : List[Any] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Dict = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
RandomResizedCrop(__UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
Resize(__UpperCamelCase ),
CenterCrop(__UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__UpperCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(__UpperCamelCase: Dict ):
SCREAMING_SNAKE_CASE : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Tuple = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__UpperCamelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE : List[Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=dataset['train'] if training_args.do_train else None ,eval_dataset=dataset['validation'] if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Any = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = last_checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('eval' ,__UpperCamelCase )
trainer.save_metrics('eval' ,__UpperCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 251
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A ( snake_case :str , snake_case :Optional[Any] , snake_case :Union[str, Any] ) -> List[Any]:
__UpperCamelCase = 1.5
__UpperCamelCase = int(factor * num_class_images )
__UpperCamelCase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=__lowerCAmelCase )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCamelCase = client.query(text=__lowerCAmelCase )
if len(__lowerCAmelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
__UpperCamelCase = int(factor * num_images )
__UpperCamelCase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__lowerCAmelCase , aesthetic_weight=0.1 , )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = tqdm(desc='downloading real regularization images' , total=__lowerCAmelCase )
with open(f'{class_data_dir}/caption.txt' , 'w' ) as fa, open(f'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
f'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
__UpperCamelCase = class_images[count]
count += 1
try:
__UpperCamelCase = requests.get(images['url'] )
if img.status_code == 2_0_0:
__UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A ( ) -> List[str]:
__UpperCamelCase = argparse.ArgumentParser('' , add_help=__lowerCAmelCase )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_0_0 , type=__lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase : int = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 353
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCamelCase : List[Any] = False
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = StableDiffusionAttendAndExcitePipeline
lowercase = False
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase = CLIPTextModel(__UpperCAmelCase )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = __UpperCamelCase = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCamelCase = pipe(**__UpperCAmelCase ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__UpperCamelCase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = torch.manual_seed(51 )
__UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
__UpperCamelCase = 'a painting of an elephant with glasses'
__UpperCamelCase = [5, 7]
__UpperCamelCase = pipe(
prompt=__UpperCAmelCase , token_indices=__UpperCAmelCase , guidance_scale=7.5 , generator=__UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] ={
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =[
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =[
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def _lowerCamelCase ( lowercase : Sequence[float] , lowercase : bool = False ) -> float:
if not arr:
return 0
_a = 0 if allow_empty_subarrays else float("-inf" )
_a = 0.0
for num in arr:
_a = max(0 if allow_empty_subarrays else num , curr_sum + num )
_a = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ : int = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 354
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple , lowercase : Optional[Any] ) -> Optional[Any]:
# Initialise PyTorch model
_a = TaConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = TaForConditionalGeneration(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 63
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = ["input_features", "is_longer"]
def __init__( self : str , A : int=64 , A : Dict=48000 , A : str=480 , A : List[Any]=10 , A : Optional[Any]=1024 , A : Tuple=0.0 , A : List[Any]=False , A : float = 0 , A : float = 14000 , A : int = None , A : str = "fusion" , A : str = "repeatpad" , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[Any] = top_db
_UpperCAmelCase : Dict = truncation
_UpperCAmelCase : List[Any] = padding
_UpperCAmelCase : Optional[Any] = fft_window_size
_UpperCAmelCase : Dict = (fft_window_size >> 1) + 1
_UpperCAmelCase : Any = hop_length
_UpperCAmelCase : Tuple = max_length_s
_UpperCAmelCase : str = max_length_s * sampling_rate
_UpperCAmelCase : Any = sampling_rate
_UpperCAmelCase : Optional[int] = frequency_min
_UpperCAmelCase : str = frequency_max
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm=A , mel_scale="htk" , )
_UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm="slaney" , mel_scale="slaney" , )
def _A ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _A ( self : Optional[Any] , A : np.array , A : Optional[np.array] = None ):
_UpperCAmelCase : Dict = spectrogram(
A , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A , log_mel="dB" , )
return log_mel_spectrogram.T
def _A ( self : str , A : str , A : List[str] , A : List[Any] ):
_UpperCAmelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Optional[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase : Tuple = [0]
# randomly choose index for each part
_UpperCAmelCase : Dict = np.random.choice(ranges[0] )
_UpperCAmelCase : str = np.random.choice(ranges[1] )
_UpperCAmelCase : Tuple = np.random.choice(ranges[2] )
_UpperCAmelCase : str = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase : Dict = torch.tensor(mel[None, None, :] )
_UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
A , size=[chunk_frames, 64] , mode="bilinear" , align_corners=A )
_UpperCAmelCase : List[str] = mel_shrink[0][0].numpy()
_UpperCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _A ( self : List[Any] , A : np.array , A : List[str] , A : Any , A : Optional[int] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase : str = len(A ) - max_length
_UpperCAmelCase : str = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase : int = waveform[idx : idx + max_length]
_UpperCAmelCase : Any = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase : Tuple = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase : int = False
else:
_UpperCAmelCase : Tuple = self._random_mel_fusion(A , A , A )
_UpperCAmelCase : Any = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase : str = int(max_length / len(A ) )
_UpperCAmelCase : Dict = np.stack(np.tile(A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase : Dict = int(max_length / len(A ) )
_UpperCAmelCase : List[str] = np.stack(np.tile(A , A ) )
_UpperCAmelCase : Optional[Any] = np.pad(A , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase : str = self._np_extract_fbank_features(A , self.mel_filters )
_UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase : List[str] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : str = None , A : Optional[str] = None , A : Optional[int] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , **A : List[str] , ):
_UpperCAmelCase : int = truncation if truncation is not None else self.truncation
_UpperCAmelCase : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
_UpperCAmelCase : List[str] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[str] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase : Dict = [
self._get_input_mel(A , max_length if max_length else self.nb_max_samples , A , A )
for waveform in raw_speech
]
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase : Union[str, Any] = np.random.randint(0 , len(A ) )
_UpperCAmelCase : Optional[Any] = True
if isinstance(input_mel[0] , A ):
_UpperCAmelCase : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase : Tuple = [[longer] for longer in is_longer]
_UpperCAmelCase : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
_UpperCAmelCase : Tuple = BatchFeature(A )
if return_tensors is not None:
_UpperCAmelCase : List[Any] = input_features.convert_to_tensors(A )
return input_features
| 31
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= int(__a )
# Initialize Result
__lowercase= []
# Traverse through all denomination
for denomination in reversed(__a ):
# Find denominations
while int(__a ) >= int(__a ):
total_value -= int(__a )
answer.append(__a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase = []
lowerCAmelCase = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
lowerCAmelCase = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
lowerCAmelCase = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'Following is minimal change for {value}: ')
lowerCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , _UpperCamelCase=1_0_0_0 , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : int = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = num_choices
UpperCAmelCase_ : List[Any] = scope
UpperCAmelCase_ : Union[str, Any] = range_bbox
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : Optional[int] = bbox[i, j, 3]
UpperCAmelCase_ : Tuple = bbox[i, j, 1]
UpperCAmelCase_ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Tuple = t
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase )
UpperCAmelCase_ : str = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Tuple = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = TFLayoutLMModel(config=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase , token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , _UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = TFLayoutLMForMaskedLM(config=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = TFLayoutLMForSequenceClassification(config=_UpperCamelCase )
UpperCAmelCase_ : Dict = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : int = TFLayoutLMForTokenClassification(config=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = TFLayoutLMForQuestionAnswering(config=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : List[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : Dict = True
_snake_case : Dict = 1_0
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = TFLayoutLMModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> List[str]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = TFLayoutLMModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
UpperCAmelCase_ : Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase_ : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
UpperCAmelCase_ : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase_ : int = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : Union[str, Any] = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
# test the sequence output on [0, :3, :3]
UpperCAmelCase_ : Any = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase_ : Tuple = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _UpperCamelCase , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self ) -> str:
# initialize model with randomly initialized sequence classification head
UpperCAmelCase_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : str = model(
input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
UpperCAmelCase_ : List[str] = outputs.loss
UpperCAmelCase_ : Tuple = (2,)
self.assertEqual(loss.shape , _UpperCamelCase )
# test the shape of the logits
UpperCAmelCase_ : Dict = outputs.logits
UpperCAmelCase_ : List[str] = (2, 2)
self.assertEqual(logits.shape , _UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
# initialize model with randomly initialized token classification head
UpperCAmelCase_ : int = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : str = model(
input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
# test the shape of the logits
UpperCAmelCase_ : int = outputs.logits
UpperCAmelCase_ : str = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> List[str]:
# initialize model with randomly initialized token classification head
UpperCAmelCase_ : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : Tuple = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
# test the shape of the logits
UpperCAmelCase_ : Tuple = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _UpperCamelCase )
self.assertEqual(outputs.end_logits.shape , _UpperCamelCase )
| 29
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __snake_case :
def __init__( self , lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__: str = parent
a__: Optional[int] = 13
a__: List[str] = 7
a__: str = True
a__: Optional[int] = True
a__: str = False
a__: List[Any] = True
a__: List[Any] = 99
a__: str = 32
a__: List[str] = 2
a__: List[str] = 4
a__: Union[str, Any] = 37
a__: Optional[int] = 'gelu'
a__: Dict = 0.1
a__: Optional[Any] = 0.1
a__: List[str] = 5_12
a__: List[str] = 16
a__: int = 2
a__: Tuple = 0.02
a__: Optional[Any] = 3
a__: Any = 4
a__: Optional[int] = None
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Tuple = None
if self.use_input_mask:
a__: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
a__: str = None
a__: str = None
a__: List[Any] = None
if self.use_labels:
a__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__: int = ids_tensor([self.batch_size] , self.num_choices)
a__: str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: List[Any] = TFDistilBertModel(config=lowercase)
a__: Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
a__: Tuple = model(lowercase)
a__: Union[str, Any] = [input_ids, input_mask]
a__: str = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: int = TFDistilBertForMaskedLM(config=lowercase)
a__: Any = {'input_ids': input_ids, 'attention_mask': input_mask}
a__: Union[str, Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = TFDistilBertForQuestionAnswering(config=lowercase)
a__: List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = self.num_labels
a__: Any = TFDistilBertForSequenceClassification(lowercase)
a__: Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
a__: List[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = self.num_choices
a__: List[Any] = TFDistilBertForMultipleChoice(lowercase)
a__: List[str] = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: int = tf.tile(tf.expand_dims(lowercase , 1) , (1, self.num_choices, 1))
a__: Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
a__: Optional[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = self.num_labels
a__: Optional[Any] = TFDistilBertForTokenClassification(lowercase)
a__: Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
a__: Tuple = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Dict = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__) , (a__) , (a__)): List[Any] = config_and_inputs
a__: Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a__ = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = TFDistilBertModelTester(self)
a__: Optional[Any] = ConfigTester(self , config_class=lowercase , dim=37)
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
a__: str = TFDistilBertModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[Any] = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
a__: Any = tf.constant([[0, 1, 2, 3, 4, 5]])
a__: str = model(lowercase)[0]
a__: Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase)
a__: Tuple = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4)
| 203
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Any = credit_card_number
a__: Tuple = 0
a__: List[str] = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
a__: Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a__: Optional[Any] = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
a__: Optional[int] = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 203
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """megatron-bert"""
def __init__( self :List[str] , lowerCamelCase :Any=2_9056 , lowerCamelCase :Optional[Any]=1024 , lowerCamelCase :List[str]=24 , lowerCamelCase :List[Any]=16 , lowerCamelCase :Any=4096 , lowerCamelCase :List[str]="gelu" , lowerCamelCase :Any=0.1 , lowerCamelCase :Any=0.1 , lowerCamelCase :Union[str, Any]=512 , lowerCamelCase :Dict=2 , lowerCamelCase :Any=0.02 , lowerCamelCase :Tuple=1e-12 , lowerCamelCase :Tuple=0 , lowerCamelCase :int="absolute" , lowerCamelCase :List[str]=True , **lowerCamelCase :Tuple , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
| 169
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self :int , lowerCamelCase :UNetaDModel , lowerCamelCase :ScoreSdeVeScheduler ) -> Any:
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , lowerCamelCase :int = 1 , lowerCamelCase :int = 2000 , lowerCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , **lowerCamelCase :Any , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(lowerCamelCase , generator=lowerCamelCase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase )
self.scheduler.set_sigmas(lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
# prediction step
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase )
| 169
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'Pix2StructImageProcessor'
SCREAMING_SNAKE_CASE_ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Tuple ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ):
'''simple docstring'''
a = False
super().__init__(__lowerCamelCase ,__lowerCamelCase )
def __call__( self : Any ,__lowerCamelCase : str=None ,__lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : Union[bool, str, PaddingStrategy] = False ,__lowerCamelCase : Union[bool, str, TruncationStrategy] = None ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[int] = 20_48 ,__lowerCamelCase : int = 0 ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,**__lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
a = self.tokenizer
a = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
a = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase )
else:
# add pixel_values and bbox
a = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
a = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
if "attention_mask" in text_encoding:
a = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
a = text_encoding.pop('''input_ids''' )
else:
a = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,*__lowerCamelCase : List[str] ,**__lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,*__lowerCamelCase : List[Any] ,**__lowerCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 330
|
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCamelCase__ : Any = tuple[int, int]
class lowerCamelCase_ :
def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ):
'''simple docstring'''
a = vertices
a = {
(min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a = weight
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = Graph({min(self.vertices )} ,{} )
a = 42
a = 42
a = 42
a = 42
while len(subgraph.vertices ) < len(self.vertices ):
a = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a = edge
a = weight
subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase )
return subgraph
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int:
"""simple docstring"""
a = os.path.abspath(os.path.dirname(snake_case_ ) )
a = os.path.join(snake_case_, snake_case_ )
a = {}
a = 42
a = 42
a = 42
with open(snake_case_ ) as f:
a = f.read().strip().split('''\n''' )
a = [line.split(''',''' ) for line in data]
for edgea in range(1, len(snake_case_ ) ):
for edgea in range(snake_case_ ):
if adjaceny_matrix[edgea][edgea] != "-":
a = int(adjaceny_matrix[edgea][edgea] )
a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ )
a = graph.prims_algorithm()
a = sum(graph.edges.values() )
a = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 330
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = DiTPipeline
lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase = False
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__UpperCamelCase ,activation_fn='gelu-approximate' ,num_embeds_ada_norm=1000 ,norm_type='ada_norm_zero' ,norm_elementwise_affine=__UpperCamelCase ,)
lowercase_ : int = AutoencoderKL()
lowercase_ : Tuple = DDIMScheduler()
lowercase_ : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=0 ) -> List[Any]:
'''simple docstring'''
if str(__UpperCamelCase ).startswith('mps' ):
lowercase_ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
else:
lowercase_ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
lowercase_ : Dict = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = 'cpu'
lowercase_ : List[Any] = self.get_dummy_components()
lowercase_ : List[str] = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Dict = self.get_dummy_inputs(__UpperCamelCase )
lowercase_ : Union[str, Any] = pipe(**__UpperCamelCase ).images
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
lowercase_ : Dict = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowercase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase ,1e-3 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__UpperCamelCase ,expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Union[str, Any] = torch.manual_seed(0 )
lowercase_ : Dict = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase_ : str = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase_ : List[str] = pipe.get_label_ids(__UpperCamelCase )
lowercase_ : List[str] = pipe(__UpperCamelCase ,generator=__UpperCamelCase ,num_inference_steps=40 ,output_type='np' ).images
for word, image in zip(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase_ : Dict = ['vase', 'umbrella']
lowercase_ : Optional[Any] = pipe.get_label_ids(__UpperCamelCase )
lowercase_ : List[Any] = torch.manual_seed(0 )
lowercase_ : int = pipe(__UpperCamelCase ,generator=__UpperCamelCase ,num_inference_steps=25 ,output_type='np' ).images
for word, image in zip(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 213
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '▁'
lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
lowerCamelCase = {
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
lowerCamelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowercase__ ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = []
UpperCamelCase = []
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : Optional[int]="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=False , **_UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = legacy_behaviour
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
UpperCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ = 1
UpperCAmelCase_ = len(self.sp_model )
UpperCAmelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
UpperCAmelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ = src_lang if src_lang is not None else "eng_Latn"
UpperCAmelCase_ = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Any , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = [1] * len(self.prefix_tokens )
UpperCAmelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[str]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase_ = tgt_lang_id
return inputs
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : int , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : int , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase , " " ).strip()
return out_string
def lowercase__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Union[str, Any]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "eng_Latn" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "fra_Latn" , **_UpperCAmelCase : Any , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ = [self.cur_lang_code]
UpperCAmelCase_ = [self.eos_token_id]
def lowercase__ ( self : Dict , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ = [self.cur_lang_code]
UpperCAmelCase_ = [self.eos_token_id]
| 353
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = 1 # (0 is vertical, 1 is horizontal)
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = get_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
print("Processing..." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = update_image_and_anno(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for index, image in enumerate(lowerCAmelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ = random_chars(32 )
UpperCAmelCase_ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase_ = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(lowerCAmelCase__ )} with {file_name}""" )
UpperCAmelCase_ = []
for anno in new_annos[index]:
UpperCAmelCase_ = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(lowerCAmelCase__ )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for label_file in glob.glob(os.path.join(lowerCAmelCase__ , "*.txt" ) ):
UpperCAmelCase_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase__ ) as in_file:
UpperCAmelCase_ = in_file.readlines()
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{label_name}.jpg""" )
UpperCAmelCase_ = []
for obj_list in obj_lists:
UpperCAmelCase_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCAmelCase__ )
labels.append(lowerCAmelCase__ )
return img_paths, labels
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for idx in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = []
UpperCAmelCase_ = img_list[idx]
path_list.append(lowerCAmelCase__ )
UpperCAmelCase_ = anno_list[idx]
UpperCAmelCase_ = cva.imread(lowerCAmelCase__ )
if flip_type == 1:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCAmelCase__ )
new_imgs_list.append(lowerCAmelCase__ )
return new_imgs_list, new_annos_lists, path_list
def a__ ( lowerCAmelCase__ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 241
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 315
|
import string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A__ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ )
A__ = num - key
if num < 0:
A__ = num + len(string.ascii_uppercase )
A__ = translated + string.ascii_uppercase[num]
else:
A__ = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = input("Encrypted message: " )
A__ = message.upper()
decrypt(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 366
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ (a_ ):
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''LayoutLMv2ImageProcessor'''
UpperCAmelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['''words''']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
UpperCAmelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(_A , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase = images
return encoded_inputs
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def _lowercase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowercase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def _lowercase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 273
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def a_ ( lowerCamelCase : float , lowerCamelCase : int ):
lowerCAmelCase = u
for i in range(1 , lowerCamelCase ):
lowerCAmelCase = temp * (u - i)
return temp
def a_ ( ):
lowerCAmelCase = int(input('enter the numbers of values: ' ) )
lowerCAmelCase = []
for _ in range(lowerCamelCase ):
y.append([] )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
y[i].append(lowerCamelCase )
lowerCAmelCase = 0
print('enter the values of parameters in a list: ' )
lowerCAmelCase = list(map(lowerCamelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCamelCase ):
lowerCAmelCase = float(input() )
lowerCAmelCase = int(input('enter the value to interpolate: ' ) )
lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase ):
for j in range(n - i ):
lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase = y[0][0]
for i in range(1 , lowerCamelCase ):
summ += (ucal(lowerCamelCase , lowerCamelCase ) * y[0][i]) / math.factorial(lowerCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 4
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Dict = emb.weight.shape
UpperCAmelCase_ : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = emb.weight.data
return lin_layer
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
UpperCAmelCase_ : Optional[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
UpperCAmelCase_ : List[str] = mam_aaa["model"]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase_ : List[Any] = MaMaaaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
UpperCAmelCase_ : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase_ : List[Any] = MaMaaaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 67
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=12 ,_snake_case=7 ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=99 ,_snake_case=32 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=0.02 ,_snake_case=0 ,_snake_case=None ,):
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = projection_dim
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ : Any = input_mask.numpy()
UpperCAmelCase_ , UpperCAmelCase_ : str = input_mask.shape
UpperCAmelCase_ : str = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def UpperCamelCase__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = TFBlipTextModel(config=_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,attention_mask=_snake_case ,training=_snake_case )
UpperCAmelCase_ : Dict = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = config_and_inputs
UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =(TFBlipTextModel,) if is_tf_available() else ()
__A : List[Any] =False
__A : List[Any] =False
__A : Any =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = BlipTextModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCamelCase__ ( self ,_snake_case=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 67
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
snake_case__ : Optional[int] = BlenderbotConfig
snake_case__ : Optional[int] = {}
snake_case__ : str = "gelu"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : int=2_0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Dict=0 , ):
"""simple docstring"""
_lowerCamelCase : Any = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Any = eos_token_id
_lowerCamelCase : Any = pad_token_id
_lowerCamelCase : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : List[str] = prepare_blenderbot_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = TFBlenderbotModel(config=__lowerCAmelCase ).get_decoder()
_lowerCamelCase : Union[str, Any] = inputs_dict['''input_ids''']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
_lowerCamelCase : str = inputs_dict['''head_mask''']
_lowerCamelCase : str = 1
# first forward pass
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : List[Any], A_ : Optional[int]=None, A_ : List[str]=None, A_ : Dict=None, A_ : Any=None, A_ : Union[str, Any]=None, ):
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : Any = tf.cast(tf.math.not_equal(A_, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_lowerCamelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
snake_case__ : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Dict = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : Union[str, Any] = True
snake_case__ : str = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : str = TFBlenderbotModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase):
snake_case__ : str = ["My friends are cool but they eat too many carbs."]
snake_case__ : int = "facebook/blenderbot-400M-distill"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.tokenizer(self.src_text , return_tensors='''tf''' )
_lowerCamelCase : int = self.model.generate(
model_inputs.input_ids , )
_lowerCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 72
|
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [ord(A_ ) - 96 for elem in plain]
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''', A_ )
print('''Decoded:''', decode(A_ ) )
if __name__ == "__main__":
main()
| 72
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__: List[str] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Optional[Any] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__magic_name__: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[Any] = []
for i in range(_A ):
__magic_name__ : Dict = i / num_diffusion_timesteps
__magic_name__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ), _A ) )
return torch.tensor(_A, dtype=torch.floataa )
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__magic_name__ : Tuple = betas_for_alpha_bar(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = 1.0 - self.betas
__magic_name__ : str = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Tuple = 1.0
# setable values
__magic_name__ : List[Any] = None
__magic_name__ : int = torch.from_numpy(np.arange(0 , lowerCAmelCase__ )[::-1].copy() )
__magic_name__ : List[Any] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : List[Any] = num_inference_steps
__magic_name__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : int = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Tuple = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : str = torch.log(torch.clamp(lowerCAmelCase__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : List[str] = variance.log()
__magic_name__ : Optional[int] = beta.log()
__magic_name__ : Any = (predicted_variance + 1) / 2
__magic_name__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ ,__magic_name__ : List[Any] = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : List[str] = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : Union[str, Any] = t - 1
__magic_name__ : List[str] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Any = 1 - alpha_prod_t
__magic_name__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Union[str, Any] = self.betas[t]
__magic_name__ : int = self.alphas[t]
else:
__magic_name__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Tuple = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Tuple = torch.clamp(
lowerCAmelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Tuple = 0
if t > 0:
__magic_name__ : Any = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ , device=model_output.device )
__magic_name__ : Tuple = self._get_variance(
lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Tuple = variance
elif self.variance_type == "learned_range":
__magic_name__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : Tuple = variance * variance_noise
__magic_name__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__magic_name__ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : Any = timesteps.to(original_samples.device )
__magic_name__ : int = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 138
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
"""simple docstring"""
A__ : Union[str, Any] = ["pixel_values"]
def __init__( self : Any , _snake_case : Any = True , _snake_case : Any = None , _snake_case : int = PILImageResampling.BILINEAR , _snake_case : List[Any] = True , _snake_case : List[Any] = None , _snake_case : Optional[Any] = True , _snake_case : Optional[Any] = 1 / 255 , _snake_case : List[Any] = True , _snake_case : Optional[Any] = None , _snake_case : int = None , **_snake_case : int , ):
super().__init__(**_snake_case )
__lowercase : Dict = size if size is not None else {'shortest_edge': 256}
__lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowercase : Tuple = get_size_dict(_snake_case )
__lowercase : Union[str, Any] = do_resize
__lowercase : List[Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Optional[Any] = crop_size
__lowercase : int = do_rescale
__lowercase : int = rescale_factor
__lowercase : Dict = do_normalize
__lowercase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self : Tuple , _snake_case : Tuple , _snake_case : Tuple , _snake_case : int = PILImageResampling.BICUBIC , _snake_case : Dict = None , **_snake_case : Optional[int] , ):
__lowercase : int = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase : Optional[int] = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Any , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : str = None , **_snake_case : List[Any] , ):
__lowercase : List[str] = get_size_dict(_snake_case )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Dict , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[Any] = None , **_snake_case : Tuple ):
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : Any , _snake_case : List[str] = None , **_snake_case : List[str] , ):
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : Any , _snake_case : Tuple , _snake_case : Union[str, Any] = None , _snake_case : Union[str, Any] = None , _snake_case : Any = None , _snake_case : Any = None , _snake_case : Optional[Any] = None , _snake_case : Optional[int] = None , _snake_case : Union[str, Any] = None , _snake_case : Any = None , _snake_case : int = None , _snake_case : str = None , _snake_case : str = None , _snake_case : Dict = ChannelDimension.FIRST , **_snake_case : Any , ):
__lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Tuple = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(_snake_case , default_to_square=_snake_case )
__lowercase : Optional[int] = resample if resample is not None else self.resample
__lowercase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : Union[str, Any] = get_size_dict(_snake_case )
__lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : List[Any] = image_mean if image_mean is not None else self.image_mean
__lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowercase : Tuple = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : int = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__lowercase : List[Any] = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
__lowercase : Any = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
__lowercase : Any = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
__lowercase : Any = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
__lowercase : str = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : int = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 156
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Tuple = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 77
| 0
|
"""simple docstring"""
from manim import *
class __A (__UpperCamelCase):
'''simple docstring'''
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("""CPU""" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("""GPU""" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("""Model""" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
rect.set_stroke(UpperCAmelCase_ )
snake_case_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase_ , buff=0.0 )
self.add(UpperCAmelCase_ )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ , *UpperCAmelCase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.7 )
target.move_to(UpperCAmelCase_ )
ckpt_arr.append(UpperCAmelCase_ )
snake_case_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
snake_case_ = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
snake_case_ = Text("""Disk""" , font_size=24 )
snake_case_ = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , Write(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) )
snake_case_ = []
for i, rect in enumerate(UpperCAmelCase_ ):
snake_case_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase_ , run_time=1.5 ) )
self.play(*UpperCAmelCase_ )
self.play(FadeOut(UpperCAmelCase_ ) )
snake_case_ = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ , *UpperCAmelCase_ ) , )
self.wait()
| 351
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 233
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """Pix2StructImageProcessor"""
lowerCAmelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = False
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 2048 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase = self.tokenizer
__lowerCamelCase = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_patches=__UpperCAmelCase , **__UpperCAmelCase )
else:
# add pixel_values and bbox
__lowerCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if "attention_mask" in text_encoding:
__lowerCamelCase = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
__lowerCamelCase = text_encoding.pop('''input_ids''' )
else:
__lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCAmelCase )
return encoding_image_processor
def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 330
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""String lengths must match!""" )
_lowercase : int = 0
for chara, chara in zip(__UpperCAmelCase , __UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=False ):
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ = ""
else:
lowerCamelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
lowerCamelCase_ = dct.pop(UpperCAmelCase_ )
lowerCamelCase_ = val
def __snake_case ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = ViTConfig()
lowerCamelCase_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCamelCase_ = True
lowerCamelCase_ = int(vit_name[-12:-10] )
lowerCamelCase_ = int(vit_name[-9:-6] )
else:
lowerCamelCase_ = 1000
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "imagenet-1k-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = int(vit_name[-6:-4] )
lowerCamelCase_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
lowerCamelCase_ = 192
lowerCamelCase_ = 768
lowerCamelCase_ = 12
lowerCamelCase_ = 3
elif vit_name[9:].startswith("small" ):
lowerCamelCase_ = 384
lowerCamelCase_ = 1536
lowerCamelCase_ = 12
lowerCamelCase_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
lowerCamelCase_ = 768
lowerCamelCase_ = 2304
lowerCamelCase_ = 8
lowerCamelCase_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
lowerCamelCase_ = 1024
lowerCamelCase_ = 4096
lowerCamelCase_ = 24
lowerCamelCase_ = 16
elif vit_name[4:].startswith("huge" ):
lowerCamelCase_ = 1280
lowerCamelCase_ = 5120
lowerCamelCase_ = 32
lowerCamelCase_ = 16
# load original model from timm
lowerCamelCase_ = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase_ = ViTModel(UpperCAmelCase_ ).eval()
else:
lowerCamelCase_ = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCamelCase_ = DeiTImageProcessor(size=config.image_size )
else:
lowerCamelCase_ = ViTImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = encoding["pixel_values"]
lowerCamelCase_ = model(UpperCAmelCase_ )
if base_model:
lowerCamelCase_ = timm_model.forward_features(UpperCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCAmelCase_ , outputs.pooler_output , atol=1E-3 )
else:
lowerCamelCase_ = timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 55
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if "cls_token" in name:
lowerCamelCase__ : Any = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
lowerCamelCase__ : Dict = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Any = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : int = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
lowerCamelCase__ : Dict = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : List[str] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : int = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase__ : List[str] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase__ : Optional[Any] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Tuple = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
lowerCamelCase__ : Optional[int] = key.split('''.''' )
lowerCamelCase__ : Any = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase__ : Optional[int] = config.decoder_hidden_size
lowerCamelCase__ : str = '''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : int = val[:dim, :]
lowerCamelCase__ : str = val[dim : dim * 2, :]
lowerCamelCase__ : Tuple = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : Tuple = val[:dim]
lowerCamelCase__ : Any = val[dim : dim * 2]
lowerCamelCase__ : Optional[Any] = val[-dim:]
else:
lowerCamelCase__ : str = config.hidden_size
lowerCamelCase__ : Dict = '''vit.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int = val[:dim, :]
lowerCamelCase__ : Any = val[dim : dim * 2, :]
lowerCamelCase__ : Any = val[-dim:, :]
elif "bias" in key:
lowerCamelCase__ : Any = val[:dim]
lowerCamelCase__ : List[str] = val[dim : dim * 2]
lowerCamelCase__ : List[str] = val[-dim:]
else:
lowerCamelCase__ : Dict = val
return orig_state_dict
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase__ : Dict = 1024
lowerCamelCase__ : int = 4096
lowerCamelCase__ : Optional[Any] = 24
lowerCamelCase__ : Dict = 16
elif "huge" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = 14
lowerCamelCase__ : Optional[int] = 1280
lowerCamelCase__ : Any = 5120
lowerCamelCase__ : Optional[Any] = 32
lowerCamelCase__ : Any = 16
lowerCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase )
lowerCamelCase__ : int = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
lowerCamelCase__ : List[str] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : Optional[int] = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
lowerCamelCase__ : int = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
lowerCamelCase__ : Any = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
lowerCamelCase__ : int = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__ : str = model(**UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCamelCase__ : List[str] = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_A : Dict = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 265
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , A : Union[str, Any] , A : Any=7 , A : Optional[int]=3 , A : Tuple=3_0 , A : List[Any]=4_0_0 , A : str=True , A : Optional[int]=None , A : Tuple=True , A : Union[str, Any]=1 / 2_5_5 , A : Any=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Optional[int]=[0.5, 0.5, 0.5] , A : str=True , ) ->List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Union[str, Any] = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : str = image_std
lowerCamelCase__ : List[Any] = do_pad
def __lowerCamelCase ( self : List[str] ) ->Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self : Tuple , A : int , A : List[str]=False ) ->int:
if not batched:
lowerCamelCase__ : Union[str, Any] = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : List[Any] = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Any = self.size['''shortest_edge''']
else:
lowerCamelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase__ : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : Optional[int] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self : Dict ) ->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''rescale_factor''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase__ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , A )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
pass
def __lowerCamelCase ( self : str ) ->Optional[Any]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Any ) ->Any:
# Initialize image_processing
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self : Tuple ) ->List[Any]:
# prepare image and target
lowerCamelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCamelCase__ : List[str] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase__ : Optional[int] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
lowerCamelCase__ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase__ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
# prepare image, target and masks_path
lowerCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Optional[Any] = json.loads(f.read() )
lowerCamelCase__ : Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase__ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase__ : Any = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
lowerCamelCase__ : Tuple = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 265
| 1
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A ( snake_case :Optional[Any]=None , snake_case :Union[str, Any]=None ) -> Dict:
return field(default_factory=lambda: default , metadata=snake_case )
@dataclass
class __lowerCAmelCase :
lowercase = field(
metadata={"help": "The csv file to plot."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Disable logarithmic scale when plotting"} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
lowercase = list_field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def A ( snake_case :Tuple ) -> List[Any]:
try:
int(snake_case )
return True
except ValueError:
return False
def A ( snake_case :str ) -> List[Any]:
try:
float(snake_case )
return True
except ValueError:
return False
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = args
__UpperCamelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__UpperCamelCase = csv.DictReader(__UpperCAmelCase )
for row in reader:
__UpperCamelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__UpperCamelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__UpperCamelCase = float(row['result'] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = plt.subplots()
__UpperCamelCase = 'Time usage' if self.args.is_time else 'Memory usage'
__UpperCamelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__UpperCamelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
__UpperCamelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
__UpperCamelCase = self.result_dict[model_name]['result']
((__UpperCamelCase) , (__UpperCamelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__UpperCamelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__UpperCamelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCAmelCase , )
else:
__UpperCamelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__UpperCamelCase) , (__UpperCamelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__UpperCamelCase = np.asarray(__UpperCAmelCase , __UpperCAmelCase )[: len(__UpperCAmelCase )]
plt.scatter(
__UpperCAmelCase , __UpperCAmelCase , label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(__UpperCAmelCase , __UpperCAmelCase , '--' )
title_str += F' {label_model_name} vs.'
__UpperCamelCase = title_str[:-4]
__UpperCamelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__UpperCAmelCase )
plt.xlabel(__UpperCAmelCase )
plt.ylabel(__UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A ( ) -> Tuple:
__UpperCamelCase = HfArgumentParser(snake_case )
__UpperCamelCase = parser.parse_args_into_dataclasses()[0]
__UpperCamelCase = Plot(args=snake_case )
plot.plot()
if __name__ == "__main__":
main()
| 316
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if len(_lowercase ) <= 1 or n <= 1:
return
insert_next(_lowercase , n - 1 )
rec_insertion_sort(_lowercase , n - 1 )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if index >= len(_lowercase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase_ : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(_lowercase , index + 1 )
if __name__ == "__main__":
__a = input('Enter integers separated by spaces: ')
__a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 360
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 235
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 343
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343
| 1
|
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0_0 ):
lowerCamelCase_ , lowerCamelCase_ = 1, 1
lowerCamelCase_ = []
for i in range(1 , n + 1 ):
lowerCamelCase_ = prev_numerator + 2 * prev_denominator
lowerCamelCase_ = prev_numerator + prev_denominator
if len(str(lowerCamelCase__ ) ) > len(str(lowerCamelCase__ ) ):
result.append(lowerCamelCase__ )
lowerCamelCase_ = numerator
lowerCamelCase_ = denominator
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 361
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_failure_array(lowerCamelCase__ )
# 2) Step through text searching for pattern
lowerCamelCase_ , lowerCamelCase_ = 0, 0 # index into text, pattern
while i < len(lowerCamelCase__ ):
if pattern[j] == text[i]:
if j == (len(lowerCamelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase_ = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [0]
lowerCamelCase_ = 0
lowerCamelCase_ = 1
while j < len(lowerCamelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase_ = failure[i - 1]
continue
j += 1
failure.append(lowerCamelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
__A ='''abc1abc12'''
__A ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A ='''ABABX'''
__A ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__A ='''AAAB'''
__A ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__A ='''abcdabcy'''
__A ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__A ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 47
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple =["pixel_values"]
def __init__( self : Any , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Dict , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_56}
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(a , size=size['''shortest_edge'''] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a , size=(size['''height'''], size['''width''']) , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : List[Any] , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Tuple , a : List[Tuple] = None ):
"""simple docstring"""
__lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(a ):
__lowerCamelCase = target_sizes.numpy()
__lowerCamelCase = []
for idx in range(len(a ) ):
__lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=a )
__lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
__lowerCamelCase = logits.argmax(dim=1 )
__lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 67
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67
| 1
|
'''simple docstring'''
from itertools import product
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
snake_case_ = sides_number
snake_case_ = max_face_number * dice_number
snake_case_ = [0] * (max_total + 1)
snake_case_ = 1
snake_case_ = range(snake_case , max_face_number + 1 )
for dice_numbers in product(snake_case , repeat=snake_case ):
snake_case_ = sum(snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
snake_case_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
snake_case_ = 0
snake_case_ = 9
snake_case_ = 4 * 9
snake_case_ = 6
for peter_total in range(snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case_ = (4**9) * (6**6)
snake_case_ = peter_wins_count / total_games_number
snake_case_ = round(snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"{solution() = }")
| 356
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case : np.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(snake_case )
if rows != columns:
snake_case_ = (
"'table' has to be of square shaped array but got a "
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(snake_case )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(snake_case ):
for j in range(snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(snake_case , snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
a = {'''mobilebert-uncased''': 512}
a = {}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = MobileBertTokenizer
def __init__( self : Tuple , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]="[UNK]" , _UpperCAmelCase : Union[str, Any]="[SEP]" , _UpperCAmelCase : Any="[PAD]" , _UpperCAmelCase : Optional[Any]="[CLS]" , _UpperCAmelCase : List[Any]="[MASK]" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : List[Any] , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
_A = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**_UpperCAmelCase )
_A = do_lower_case
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Any=None ):
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
_A = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 315
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A : Optional[int] = logging.get_logger(__name__)
A : Dict[Optional[str], Type[Formatter]] = {}
A : Dict[Optional[str], str] = {}
A : Dict[Optional[str], Exception] = {}
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
__lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
__lowerCAmelCase = format_type
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ):
'''simple docstring'''
__lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
A : Optional[Any] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
A : Tuple = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
A : Any = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _lowerCamelCase ( _UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = get_format_type_from_alias(_UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 259
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
def a_ ( ):
UpperCAmelCase__ = Node(1 )
UpperCAmelCase__ = Node(2 )
UpperCAmelCase__ = Node(3 )
UpperCAmelCase__ = Node(4 )
UpperCAmelCase__ = Node(5 )
return tree
def a_ ( lowerCamelCase ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( lowerCamelCase ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( lowerCamelCase ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( lowerCamelCase ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
if root is None:
return output
UpperCAmelCase__ = deque([root] )
while process_queue:
UpperCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
def populate_output(lowerCamelCase , lowerCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
def populate_output(lowerCamelCase , lowerCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def a_ ( lowerCamelCase ):
if root is None:
return []
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = height(lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = 0
return output
def a_ ( ): # Main function for testing.
UpperCAmelCase__ = make_tree()
print(f'''In-order Traversal: {inorder(lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(lowerCamelCase )}''' , '\n' )
print(f'''Height of Tree: {height(lowerCamelCase )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(lowerCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(lowerCamelCase , level=lowerCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 98
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _A ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowercase ( _A , _A ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE : List[Any] = []
for d in reversed(_A ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A ) -> None:
SCREAMING_SNAKE_CASE : int = True
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : Any = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A )
SCREAMING_SNAKE_CASE : List[str] = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor:
SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set(
_A , _A , _A , )
SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any:
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A )
SCREAMING_SNAKE_CASE : Optional[int] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : int = _select_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : Tuple = layer(**_A )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A , _A ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : str = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 245
| 0
|
def lowerCAmelCase__ ( a__: str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = len(a__ )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ :int = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ :List[str] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 185
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __a ( pl.LightningModule ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = model
_UpperCAmelCase = 2
_UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__: str , a__: str , a__: str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = LongformerModel.from_pretrained(a__ )
_UpperCAmelCase = LightningModel(a__ )
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 185
| 1
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = old_name.split('''.''' )
if layer == "0":
SCREAMING_SNAKE_CASE__ = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
SCREAMING_SNAKE_CASE__ = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
SCREAMING_SNAKE_CASE__ = old_name.replace('''3''' , '''convolution2''' )
else:
SCREAMING_SNAKE_CASE__ = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _A ):
SCREAMING_SNAKE_CASE__ = R'''\b\d{2}\b'''
if bool(re.search(_A , _A ) ):
SCREAMING_SNAKE_CASE__ = re.search(R'''\d\.\d\d.''' , _A ).group()
else:
SCREAMING_SNAKE_CASE__ = re.search(R'''\d\.\d.''' , _A ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ = old_name.replace(_A , '''''' )
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
SCREAMING_SNAKE_CASE__ = '''intermediate_stages.''' + trimmed_name
else:
SCREAMING_SNAKE_CASE__ = old_name.replace(_A , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
SCREAMING_SNAKE_CASE__ = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ = trimmed_name.replace('''fc2''' , '''linear_out''' )
SCREAMING_SNAKE_CASE__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _A ):
SCREAMING_SNAKE_CASE__ = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ = new_name.replace('''norm''' , '''layernorm''' )
SCREAMING_SNAKE_CASE__ = '''efficientformer.''' + new_name
else:
SCREAMING_SNAKE_CASE__ = '''efficientformer.encoder.''' + new_name
return new_name
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ = checkpoint.pop(_A )
SCREAMING_SNAKE_CASE__ = val
return checkpoint
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ = EfficientFormerConfig.from_json_file(_A )
SCREAMING_SNAKE_CASE__ = EfficientFormerForImageClassificationWithTeacher(_A )
SCREAMING_SNAKE_CASE__ = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
SCREAMING_SNAKE_CASE__ = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
SCREAMING_SNAKE_CASE__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = 2_56
SCREAMING_SNAKE_CASE__ = 2_24
SCREAMING_SNAKE_CASE__ = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
SCREAMING_SNAKE_CASE__ = processor(images=_A , return_tensors='''pt''' ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ = Compose(
[
Resize(_A , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
SCREAMING_SNAKE_CASE__ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = (1, 10_00)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , _A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , _A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_A )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_A , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 314
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A_ : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[List, PIL.Image.Image, torch.Tensor] )-> List[Any]:
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , snake_case , )
if isinstance(snake_case , torch.Tensor ):
return image
elif isinstance(snake_case , PIL.Image.Image ):
_lowerCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCamelCase , _lowerCamelCase = image[0].size
_lowerCamelCase , _lowerCamelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCamelCase = np.concatenate(snake_case , axis=0 )
_lowerCamelCase = np.array(snake_case ).astype(np.floataa ) / 2_5_5.0
_lowerCamelCase = image.transpose(0 , 3 , 1 , 2 )
_lowerCamelCase = 2.0 * image - 1.0
_lowerCamelCase = torch.from_numpy(snake_case )
elif isinstance(image[0] , torch.Tensor ):
_lowerCamelCase = torch.cat(snake_case , dim=0 )
return image
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[List, PIL.Image.Image, torch.Tensor] )-> List[Any]:
if isinstance(snake_case , torch.Tensor ):
return mask
elif isinstance(snake_case , PIL.Image.Image ):
_lowerCamelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCamelCase , _lowerCamelCase = mask[0].size
_lowerCamelCase , _lowerCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCamelCase = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_lowerCamelCase = np.concatenate(snake_case , axis=0 )
_lowerCamelCase = mask.astype(np.floataa ) / 2_5_5.0
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = torch.from_numpy(snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCamelCase = torch.cat(snake_case , dim=0 )
return mask
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : UNetaDModel
SCREAMING_SNAKE_CASE__ : RePaintScheduler
def __init__( self , a__ , a__ ):
super().__init__()
self.register_modules(unet=a__ , scheduler=a__ )
@torch.no_grad()
def __call__( self , a__ , a__ , a__ = 2_50 , a__ = 0.0 , a__ = 10 , a__ = 10 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCamelCase = image
_lowerCamelCase = _preprocess_image(a__ )
_lowerCamelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCamelCase = _preprocess_mask(a__ )
_lowerCamelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCamelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(a__ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCamelCase = original_image.shape
_lowerCamelCase = randn_tensor(a__ , generator=a__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a__ , a__ , a__ , self.device )
_lowerCamelCase = eta
_lowerCamelCase = self.scheduler.timesteps[0] + 1
_lowerCamelCase = generator[0] if isinstance(a__ , a__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowerCamelCase = self.unet(a__ , a__ ).sample
# compute previous image: x_t -> x_t-1
_lowerCamelCase = self.scheduler.step(a__ , a__ , a__ , a__ , a__ , a__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCamelCase = self.scheduler.undo_step(a__ , a__ , a__ )
_lowerCamelCase = t
_lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 80
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=snake_case )
_lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=snake_case )
env_command_parser(subparsers=snake_case )
launch_command_parser(subparsers=snake_case )
tpu_command_parser(subparsers=snake_case )
test_command_parser(subparsers=snake_case )
# Let's go
_lowerCamelCase = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(snake_case )
if __name__ == "__main__":
main()
| 80
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( snake_case_ :ndarray ):
return np.dot(snake_case_ , snake_case_ )
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowercase )
def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ):
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__UpperCAmelCase) , ) = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
__UpperCAmelCase = 0
((__UpperCAmelCase) , ) = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def a ( self : List[Any] , _lowercase : ndarray ):
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
__a: Optional[Any] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "new" , UpperCAmelCase = None ):
lowercase__ : List[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCAmelCase ) - valid_terms ) ):
lowercase__ : Union[str, Any] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(UpperCAmelCase )
lowercase__ : Any = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
lowercase__ : Any = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCAmelCase )}
lowercase__ : Any = {}
for id_ in range(UpperCAmelCase ):
lowercase__ : Tuple = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 214
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__a: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : str = {}
if "candidate_labels" in kwargs:
lowercase__ : str = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : str = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="This is a photo of {}." ) -> Any:
lowercase__ : Union[str, Any] = load_image(__lowerCAmelCase )
lowercase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Union[str, Any] = candidate_labels
lowercase__ : int = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
lowercase__ : Any = [text_inputs]
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Any = model_inputs.pop('''candidate_labels''' )
lowercase__ : int = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
lowercase__ : Optional[Any] = text_inputs[0][0]
lowercase__ : Any = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Union[str, Any] = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Any = probs.tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Dict = [scores]
elif self.framework == "tf":
lowercase__ : List[Any] = stable_softmax(__lowerCAmelCase , axis=-1 )
lowercase__ : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowercase__ : Optional[int] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 214
| 1
|
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = generate_pascal_triangle(snake_case__ )
for row_idx in range(snake_case__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=""" """ )
else:
print(triangle[row_idx][col_idx] ,end="""""" )
print()
def __lowerCamelCase ( snake_case__ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
_SCREAMING_SNAKE_CASE = []
for current_row_idx in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = populate_current_row(snake_case__ ,snake_case__ )
triangle.append(snake_case__ )
return triangle
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1, 1
for current_col_idx in range(1 ,snake_case__ ):
calculate_current_element(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
return current_row
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = triangle[current_row_idx - 1][current_col_idx - 1]
_SCREAMING_SNAKE_CASE = triangle[current_row_idx - 1][current_col_idx]
_SCREAMING_SNAKE_CASE = above_to_left_elt + above_to_right_elt
def __lowerCamelCase ( snake_case__ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
_SCREAMING_SNAKE_CASE = [[1]]
for row_index in range(1 ,snake_case__ ):
_SCREAMING_SNAKE_CASE = [0] + result[-1] + [0]
_SCREAMING_SNAKE_CASE = row_index + 1
# Calculate the number of distinct elements in a row
_SCREAMING_SNAKE_CASE = sum(divmod(snake_case__ ,2 ) )
_SCREAMING_SNAKE_CASE = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
_SCREAMING_SNAKE_CASE = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_SCREAMING_SNAKE_CASE = row_first_half + row_second_half
result.append(snake_case__ )
return result
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__ ,snake_case__ ) -> None:
_SCREAMING_SNAKE_CASE = F'{func.__name__}({value})'
_SCREAMING_SNAKE_CASE = timeit(F'__main__.{call}' ,setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case__ ,snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , '''Tatoeba directory does not exist.''' )
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__snake_case )
@slow
def lowercase__ ( self : Dict ) -> int:
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 220
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 4_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase , _lowerCAmelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = b, a + b
return sum(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 220
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = ["""onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
requires_backends(cls , ["onnx"] )
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A : Any = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Any , **SCREAMING_SNAKE_CASE : str):
super().__init__(**SCREAMING_SNAKE_CASE)
requires_backends(self , 'vision')
requires_backends(self , 'torch')
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
self.check_model_type(SCREAMING_SNAKE_CASE)
def A ( self : int , **SCREAMING_SNAKE_CASE : Optional[int]):
_A : List[str] = {}
_A : Optional[int] = {}
_A : List[Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
_A : Any = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
_A : List[str] = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
_A : Union[str, Any] = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
_A : Dict = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
_A : Tuple = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
_A : Optional[int] = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
_A : Any = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
_A : Optional[int] = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
_A : List[Any] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
_A : Optional[int] = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
_A : Any = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
_A : List[str] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : Union[str, Any]):
return super().__call__(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=64 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : float = 512 / 1500 , SCREAMING_SNAKE_CASE : Optional[int] = 32 , SCREAMING_SNAKE_CASE : Optional[int] = 1 , ):
_A : Any = load_image(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = self.image_processor.size['longest_edge']
_A , _A , _A , _A : Optional[int] = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt')
with self.device_placement():
if self.framework == "pt":
_A : Optional[Any] = self.get_inference_context()
with inference_context():
_A : List[str] = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE , device=self.device)
_A : int = self.model.get_image_embeddings(model_inputs.pop('pixel_values'))
_A : List[Any] = image_embeddings
_A : List[str] = grid_points.shape[1]
_A : Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None')
for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Optional[Any] = grid_points[:, i : i + points_per_batch, :, :]
_A : Tuple = input_labels[:, i : i + points_per_batch]
_A : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=0.88 , SCREAMING_SNAKE_CASE : Any=0.95 , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Any=1 , ):
_A : Optional[Any] = model_inputs.pop('input_boxes')
_A : Tuple = model_inputs.pop('is_last')
_A : List[Any] = model_inputs.pop('original_sizes').tolist()
_A : int = model_inputs.pop('reshaped_input_sizes').tolist()
_A : Dict = self.model(**SCREAMING_SNAKE_CASE)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_A : Optional[Any] = model_outputs['pred_masks']
_A : Union[str, Any] = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , binarize=SCREAMING_SNAKE_CASE)
_A : Tuple = model_outputs['iou_scores']
_A , _A , _A : List[str] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[str]=0.7 , ):
_A : str = []
_A : Dict = []
_A : Tuple = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores'))
all_masks.extend(model_output.pop('masks'))
all_boxes.append(model_output.pop('boxes'))
_A : int = torch.cat(SCREAMING_SNAKE_CASE)
_A : Tuple = torch.cat(SCREAMING_SNAKE_CASE)
_A , _A , _A , _A : List[Any] = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Tuple = defaultdict(SCREAMING_SNAKE_CASE)
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = {}
if output_rle_mask:
_A : str = rle_mask
if output_bboxes_mask:
_A : List[Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 227
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A : int = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE : int="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Tuple="<unk>" , SCREAMING_SNAKE_CASE : List[Any]=None , ):
_A , _A , _A , _A : Any = bos, unk, pad, eos
_A : Optional[Any] = []
_A : Optional[Any] = []
_A : Optional[int] = {}
_A : Dict = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : str = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : Any = self.add_symbol(SCREAMING_SNAKE_CASE)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = len(self.symbols)
def __eq__( self : int , SCREAMING_SNAKE_CASE : Optional[Any]):
return self.indices == other.indices
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : Union[str, Any]):
return len(self.symbols)
def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]):
return sym in self.indices
@classmethod
def A ( cls : Dict , SCREAMING_SNAKE_CASE : Optional[Any]):
_A : Any = cls()
d.add_from_file(SCREAMING_SNAKE_CASE)
return d
def A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : int=False):
if word in self.indices and not overwrite:
_A : str = self.indices[word]
_A : List[str] = self.count[idx] + n
return idx
else:
_A : Optional[Any] = len(self.symbols)
_A : Union[str, Any] = idx
self.symbols.append(SCREAMING_SNAKE_CASE)
self.count.append(SCREAMING_SNAKE_CASE)
return idx
def A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]):
return 0
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
try:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8') as fd:
self.add_from_file(SCREAMING_SNAKE_CASE)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE))
return
_A : Union[str, Any] = f.readlines()
_A : Any = self._load_meta(SCREAMING_SNAKE_CASE)
for line in lines[indices_start_line:]:
try:
_A , _A : List[str] = line.rstrip().rsplit(' ' , 1)
if field == "#fairseq:overwrite":
_A : int = True
_A , _A : List[str] = line.rsplit(' ' , 1)
else:
_A : Union[str, Any] = False
_A : List[str] = int(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE))
self.add_symbol(SCREAMING_SNAKE_CASE , n=SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE)
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'')
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_A : Union[str, Any] = dict((re.sub(R'@@$' ,'' ,lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' ,'</w>' ,lowerCamelCase ), v) for k, v in d.items() )
_A : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_A : str = d[k] # restore
return da
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : List[str] ):
# prep
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_A : Dict = os.path.join(lowerCamelCase ,'checkpoint.pt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_A : int = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Dict = chkpt['cfg']['model']
# dicts
_A : Any = os.path.join(lowerCamelCase ,'dict.txt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_A : Any = Dictionary.load(lowerCamelCase )
_A : Optional[int] = rewrite_dict_keys(src_dict.indices )
_A : List[Any] = len(lowerCamelCase )
_A : str = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# merges_file (bpecodes)
_A : Optional[int] = os.path.join(lowerCamelCase ,'bpecodes' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_A : Dict = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCamelCase ,lowerCamelCase )
# model config
_A : str = os.path.join(lowerCamelCase ,'config.json' )
_A : int = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# tokenizer config
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
_A : Any = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# model
_A : List[Any] = chkpt['model']
# remove unneeded keys
_A : int = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase ,lowerCamelCase )
_A : Any = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_A : str = model_state_dict.pop(lowerCamelCase )
else:
_A : Dict = model_state_dict.pop(lowerCamelCase )
_A : Any = BioGptConfig.from_pretrained(lowerCamelCase )
_A : Union[str, Any] = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase ,lowerCamelCase )
print('Conversion is done!' )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 227
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowerCamelCase : Tuple = pd.read_csv('sample_data.csv', header=None)
_lowerCamelCase : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowerCamelCase : Optional[int] = df.iloc[:, 1:2]
_lowerCamelCase : Any = actual_data.values.reshape(len_data, 1)
_lowerCamelCase : str = MinMaxScaler().fit_transform(actual_data)
_lowerCamelCase : Any = 10
_lowerCamelCase : Dict = 5
_lowerCamelCase : List[str] = 20
_lowerCamelCase : Optional[Any] = len_data - periods * look_back
_lowerCamelCase : Dict = actual_data[:division]
_lowerCamelCase : Optional[Any] = actual_data[division - look_back :]
_lowerCamelCase , _lowerCamelCase : str = [], []
_lowerCamelCase , _lowerCamelCase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowerCamelCase : List[Any] = np.array(train_x)
_lowerCamelCase : Dict = np.array(test_x)
_lowerCamelCase : Any = np.array([list(i.ravel()) for i in train_y])
_lowerCamelCase : Union[str, Any] = np.array([list(i.ravel()) for i in test_y])
_lowerCamelCase : Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowerCamelCase : Dict = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowerCamelCase : Union[str, Any] = model.predict(x_test)
| 258
|
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258
| 1
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=30, __a=2, __a=3, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : List[str] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTMSNModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.type_sequence_label_size
_lowerCAmelCase : Optional[int] = ViTMSNForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[str] = model(__a, labels=__a)
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}")
print("Labels: {labels}")
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Tuple = ViTMSNForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase : Optional[int] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ViTMSNModelTester(self)
_lowerCAmelCase : Optional[int] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(__a)
_lowerCAmelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = ViTMSNModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(2)
_lowerCAmelCase : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(__a)
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**__a)
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : str = torch.tensor([-0.0_803, -0.4_454, -0.2_375]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
| 300
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__snake_case = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _A ( SCREAMING_SNAKE_CASE__ : int ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if args.student_type == "roberta":
UpperCamelCase :Optional[Any] = False
elif args.student_type == "gpt2":
UpperCamelCase :List[str] = False
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
if args.student_type == "roberta":
UpperCamelCase :List[str] = False
def _A ( ):
UpperCamelCase :List[str] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=SCREAMING_SNAKE_CASE__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=SCREAMING_SNAKE_CASE__ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=SCREAMING_SNAKE_CASE__ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=SCREAMING_SNAKE_CASE__ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=SCREAMING_SNAKE_CASE__ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=SCREAMING_SNAKE_CASE__ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE__ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=SCREAMING_SNAKE_CASE__ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE__ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=SCREAMING_SNAKE_CASE__ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=SCREAMING_SNAKE_CASE__ , default=4000 , help='''Checkpoint interval.''' )
UpperCamelCase :int = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__ )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__ )
set_seed(SCREAMING_SNAKE_CASE__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , indent=4 )
git_log(args.dump_path )
UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = MODEL_CLASSES[args.student_type]
UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase :List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase :Dict = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase :Tuple = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
UpperCamelCase :Tuple = special_tok_ids
UpperCamelCase :List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
UpperCamelCase :Tuple = pickle.load(SCREAMING_SNAKE_CASE__ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
UpperCamelCase :Optional[Any] = pickle.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = np.maximum(SCREAMING_SNAKE_CASE__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase :Optional[Any] = 0.0 # do not predict special tokens
UpperCamelCase :Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :int = None
UpperCamelCase :Dict = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
UpperCamelCase :List[str] = student_config_class.from_pretrained(args.student_config )
UpperCamelCase :Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
UpperCamelCase :Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Any = student_model_class(SCREAMING_SNAKE_CASE__ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCamelCase :Dict = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase :Optional[Any] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 259
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=() , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]="no" , SCREAMING_SNAKE_CASE__ : Dict="29500" ):
UpperCamelCase :List[Any] = False
UpperCamelCase :Tuple = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCamelCase :Dict = True
elif "IPython" in sys.modules:
UpperCamelCase :int = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCamelCase :Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , SCREAMING_SNAKE_CASE__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCamelCase :Tuple = 8
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase :Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=() , SCREAMING_SNAKE_CASE__ : int=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , debug=SCREAMING_SNAKE_CASE__ )
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
| 259
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 365
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(snake_case__ )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 248
| 0
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase (snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = job["""started_at"""]
lowerCAmelCase = job["""completed_at"""]
lowerCAmelCase = date_parser.parse(snake_case__ )
lowerCAmelCase = date_parser.parse(snake_case__ )
lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = duration_in_min
return job_info
def lowercase (snake_case__ : str , snake_case__ : Dict=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
lowerCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase = requests.get(snake_case__ , headers=snake_case__ ).json()
lowerCAmelCase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case__ ):
lowerCAmelCase = requests.get(url + f'''&page={i + 2}''' , headers=snake_case__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
a = parser.parse_args()
a = get_job_time(args.workflow_run_id)
a = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 155
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=[16, 16] , lowerCAmelCase : Optional[Any]=128 , lowerCAmelCase : Union[str, Any]=4_4100 , lowerCAmelCase : Any=86 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : Any , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = spectrogram_length
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = feature_size // self.patch_size[1]
lowerCAmelCase = n_fft
lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = padding_value
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowercase ( self : int , lowerCAmelCase : np.array ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = log_spec - 20.0
lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
lowerCAmelCase = audio_features[i]
lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCAmelCase = {"""audio_values""": padded_audio_features}
lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 155
| 1
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = DownBlockaD # noqa F405
lowerCAmelCase : int = "down"
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase : str = "down"
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ : Tuple = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = AttnDownBlockaD # noqa F405
lowerCAmelCase : str = "down"
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Tuple = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase : Optional[Any] = "down"
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = super().prepare_init_args_and_inputs_for_common()
lowercase__ : Optional[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Any = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase : Tuple = "down"
@property
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowercase__ : Dict = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = SkipDownBlockaD # noqa F405
lowerCAmelCase : Optional[int] = "down"
@property
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase : Optional[int] = "down"
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_snake_case )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = DownEncoderBlockaD # noqa F405
lowerCAmelCase : Optional[int] = "down"
@property
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowercase__ : Any = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase : Union[str, Any] = "down"
@property
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ : int = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowercase__ : str = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = UNetMidBlockaD # noqa F405
lowerCAmelCase : Optional[int] = "mid"
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : int = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase : Optional[Any] = "mid"
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = super().prepare_init_args_and_inputs_for_common()
lowercase__ : Tuple = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase : List[Any] = "mid"
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = super().prepare_init_args_and_inputs_for_common()
lowercase__ : Any = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = UpBlockaD # noqa F405
lowerCAmelCase : Union[str, Any] = "up"
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase : Any = "up"
@property
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase : Tuple = "up"
@property
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowercase__ : Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase : str = "up"
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case ,include_encoder_hidden_states=_snake_case )
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowercase__ : List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : int = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = AttnUpBlockaD # noqa F405
lowerCAmelCase : List[Any] = "up"
@property
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : str = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = SkipUpBlockaD # noqa F405
lowerCAmelCase : int = "up"
@property
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowercase__ : int = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase : Optional[int] = "up"
@property
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_snake_case )
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : str = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = UpDecoderBlockaD # noqa F405
lowerCAmelCase : Any = "up"
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_snake_case )
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase : Union[str, Any] = "up"
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_temb=_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_snake_case )
| 302
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a_ :
__lowerCAmelCase : List[str] = 4_2
__lowerCAmelCase : Dict = None
# Automatically constructed
__lowerCAmelCase : Optional[int] = """dict"""
__lowerCAmelCase : str = None
__lowerCAmelCase : List[Any] = field(default="""Translation""" , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCamelCase ( self ):
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class a_ :
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : str = None
__lowerCAmelCase : List[Any] = None
# Automatically constructed
__lowerCAmelCase : int = """dict"""
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : int = field(default="""TranslationVariableLanguages""" , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = set(self.languages )
if self.languages and set(A_ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({", ".join(A_ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(A_ , A_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase : Any = zip(*sorted(A_ ) )
return {"language": languages, "translation": translations}
def __UpperCamelCase ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 309
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """table-transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A_ , A_ ):
__lowerCAmelCase : int = backbone_config.get('''model_type''' )
__lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase : Any = config_class.from_dict(A_ )
# set timm attributes to None
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None
__lowerCAmelCase : Tuple = use_timm_backbone
__lowerCAmelCase : Optional[Any] = backbone_config
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Tuple = num_queries
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_ffn_dim
__lowerCAmelCase : Optional[int] = encoder_layers
__lowerCAmelCase : List[str] = encoder_attention_heads
__lowerCAmelCase : str = decoder_ffn_dim
__lowerCAmelCase : Union[str, Any] = decoder_layers
__lowerCAmelCase : Any = decoder_attention_heads
__lowerCAmelCase : Optional[int] = dropout
__lowerCAmelCase : Any = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : List[str] = init_std
__lowerCAmelCase : Tuple = init_xavier_std
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : List[Any] = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[Any] = auxiliary_loss
__lowerCAmelCase : Optional[Any] = position_embedding_type
__lowerCAmelCase : Tuple = backbone
__lowerCAmelCase : Any = use_pretrained_backbone
__lowerCAmelCase : int = dilation
# Hungarian matcher
__lowerCAmelCase : Dict = class_cost
__lowerCAmelCase : List[str] = bbox_cost
__lowerCAmelCase : int = giou_cost
# Loss coefficients
__lowerCAmelCase : Optional[Any] = mask_loss_coefficient
__lowerCAmelCase : Tuple = dice_loss_coefficient
__lowerCAmelCase : int = bbox_loss_coefficient
__lowerCAmelCase : List[Any] = giou_loss_coefficient
__lowerCAmelCase : int = eos_coefficient
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return self.d_model
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ ( self ) ->float:
'''simple docstring'''
return 1e-5
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return 12
| 275
| 0
|
import argparse
A : int = 'docs/source/_static/js/custom.js'
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(__magic_name__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__magic_name__ )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
A : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 146
|
from functools import lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> set:
"""simple docstring"""
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__magic_name__ ) )
def UpperCamelCase ( __magic_name__ : list ) -> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) in (0, 1)
def UpperCamelCase ( __magic_name__ : int ) -> list:
"""simple docstring"""
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase ( __magic_name__ : int = 4 ) -> int:
"""simple docstring"""
lowercase__ = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 146
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=None , ):
lowercase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 20}
lowercase__ : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase__ : Optional[int] = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : int = num_channels
lowercase__ : Union[str, Any] = image_size
lowercase__ : Any = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Optional[Any] = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : List[Any] = do_center_crop
lowercase__ : Dict = crop_size
def snake_case_ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : str = MobileNetVaImageProcessor if is_vision_available() else None
def snake_case_ ( self):
lowercase__ : List[str] = MobileNetVaImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'size'))
self.assertTrue(hasattr(a , 'do_center_crop'))
self.assertTrue(hasattr(a , 'crop_size'))
def snake_case_ ( self):
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 20})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
lowercase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def snake_case_ ( self):
pass
def snake_case_ ( self):
# Initialize image_processing
lowercase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : int = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def snake_case_ ( self):
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Optional[int] = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def snake_case_ ( self):
# Initialize image_processing
lowercase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 214
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
snake_case_ , snake_case_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
snake_case_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
snake_case_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 214
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _A :
UpperCamelCase__ : int
UpperCamelCase__ : TreeNode | None = None
UpperCamelCase__ : TreeNode | None = None
__snake_case :Optional[Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __snake_case ( _UpperCAmelCase ):
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__a , __a = get_distrib(node.left )
__a , __a = get_distrib(node.right )
__a = 1 - left_distrib_excess
__a = 1 - right_distrib_excess
__a = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
__a = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Dict = logging.get_logger()
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : list = field(default_factory=__UpperCAmelCase )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = len(list(m.modules())) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE)
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__SCREAMING_SNAKE_CASE)
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return list(filter(lambda __SCREAMING_SNAKE_CASE: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int = 1
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : bool = True
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = Tracker(self.dest)(__SCREAMING_SNAKE_CASE).parametrized
__a = Tracker(self.src)(__SCREAMING_SNAKE_CASE).parametrized
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.src_skip , __SCREAMING_SNAKE_CASE))
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.dest_skip , __SCREAMING_SNAKE_CASE))
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE)} operations while'
F' destination module has {len(__SCREAMING_SNAKE_CASE)}.')
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class _A ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : nn.Module):
'''simple docstring'''
super().__init__()
__a = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block'''), F'Unexpected layer name {k}'
__a = len(__SCREAMING_SNAKE_CASE) + 1
feature_blocks.append((F'res{block_index}', v))
__a = nn.ModuleDict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
return get_trunk_forward_outputs(
__SCREAMING_SNAKE_CASE , out_feat_keys=__SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , )
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = x.split('''-''')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if x not in self:
__a = self.convert_name_to_timm(__SCREAMING_SNAKE_CASE)
__a = partial(lambda: (timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE).eval(), None))
else:
__a = super().__getitem__(__SCREAMING_SNAKE_CASE)
return val
class _A ( __UpperCAmelCase ):
def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__a = RegNetModel
else:
__a = RegNetForImageClassification
return val
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for from_key, to_key in keys:
__a = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
print(f'Converting {name}...' )
with torch.no_grad():
__a , __a = from_model_func()
__a = our_model_func(_UpperCAmelCase ).eval()
__a = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase , raise_if_mismatch=_UpperCAmelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
if from_state_dict is not None:
__a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__a = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__a = manually_copy_vissl_head(_UpperCAmelCase , our_model.state_dict() , _UpperCAmelCase )
our_model.load_state_dict(_UpperCAmelCase )
__a = our_model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
__a = (
our_outputs.logits if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else our_outputs.last_hidden_state
)
__a = from_model(_UpperCAmelCase )
__a = from_output[-1] if type(_UpperCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__a = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_UpperCAmelCase , )
__a = 224 if '''seer''' not in name else 384
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_UpperCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_UpperCAmelCase , )
print(f'Pushed {name}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
__a = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
__a = NameToOurModelFuncMap()
__a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_UpperCAmelCase , _UpperCAmelCase ) -> Tuple[nn.Module, Dict]:
__a = torch.hub.load_state_dict_from_url(_UpperCAmelCase , model_dir=str(_UpperCAmelCase ) , map_location='''cpu''' )
__a = model_func()
# check if we have a head, if yes add it
__a = files['''classy_state_dict''']['''base_model''']['''model''']
__a = model_state_dict['''trunk''']
model.load_state_dict(_UpperCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__a = partial(
_UpperCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__snake_case :Tuple = parser.parse_args()
__snake_case :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = RobertaTokenizer
_snake_case : Any = RobertaTokenizerFast
_snake_case : Optional[int] = True
_snake_case : List[Any] = {'''cls_token''': '''<s>'''}
def __UpperCAmelCase ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase_ : Union[str, Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase_ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = 'lower newer'
UpperCAmelCase_ : Optional[int] = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Optional[Any] = 'lower newer'
UpperCAmelCase_ : Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('roberta-base' )
UpperCAmelCase_ : str = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = 'Encode this sequence.'
UpperCAmelCase_ : int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
UpperCAmelCase_ : List[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
UpperCAmelCase_ : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = 'Encode <mask> sequence'
UpperCAmelCase_ : Union[str, Any] = 'Encode <mask>sequence'
UpperCAmelCase_ : int = tokenizer.encode(_UpperCamelCase )
UpperCAmelCase_ : Any = encoded.index(_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.encode(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = encoded.index(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
pass
def __UpperCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase_ : Optional[int] = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Any = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : List[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 29
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = len(lowercase )
for i in range(length - 1 ):
snake_case : List[str] = i
for k in range(i + 1 , lowercase ):
if collection[k] < collection[least]:
snake_case : List[str] = k
if least != i:
snake_case ,snake_case : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 203
| 0
|
import math
import unittest
def __magic_name__ ( __lowerCAmelCase : int ) -> bool:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> Tuple:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __A ( self : List[str] ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 353
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = """xmod"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
__lowerCamelCase = pre_norm
__lowerCamelCase = adapter_reduction_factor
__lowerCamelCase = adapter_layer_norm
__lowerCamelCase = adapter_reuse_layer_norm
__lowerCamelCase = ln_before_adapter
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = default_language
class lowerCAmelCase__ ( __lowercase ):
@property
def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 339
| 0
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 227
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=lowerCamelCase_ )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowerCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = self.prepare_image_inputs()
a = image_processor(lowerCamelCase_ , return_tensors="np" )
a = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=lowerCamelCase_ )
a = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(lowerCamelCase_ )
a = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 227
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['DPTFeatureExtractor']
lowerCamelCase_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
for attribute in key.split("." ):
a : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
a : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
a : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a : List[str] = value
elif weight_type == "weight_g":
a : int = value
elif weight_type == "weight_v":
a : int = value
elif weight_type == "bias":
a : Tuple = value
else:
a : Union[str, Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[str] , _lowercase : int ) ->Any:
'''simple docstring'''
a : Dict = []
a : int = fairseq_model.state_dict()
a : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a : int = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a : Optional[Any] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
a : List[str] = True
if "*" in mapped_key:
a : Union[str, Any] = name.split(_lowercase )[0].split("." )[-2]
a : str = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
a : Optional[int] = "weight_g"
elif "weight_v" in name:
a : Optional[Any] = "weight_v"
elif "weight" in name:
a : Tuple = "weight"
elif "bias" in name:
a : Tuple = "bias"
else:
a : Union[str, Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
a : List[Any] = full_name.split("conv_layers." )[-1]
a : Any = name.split("." )
a : List[str] = int(items[0] )
a : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Tuple , _lowercase : List[str]=None , _lowercase : Dict=None , _lowercase : int=True ) ->List[Any]:
'''simple docstring'''
if config_path is not None:
a : Tuple = HubertConfig.from_pretrained(_lowercase )
else:
a : Any = HubertConfig()
if is_finetuned:
if dict_path:
a : str = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a : int = target_dict.pad_index
a : Optional[int] = target_dict.bos_index
a : Dict = target_dict.eos_index
a : Optional[int] = len(target_dict.symbols )
a : List[str] = os.path.join(_lowercase , "vocab.json" )
if not os.path.isdir(_lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowercase )
a : Optional[int] = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowercase , )
a : int = True if config.feat_extract_norm == "layer" else False
a : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
a : str = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
a : int = HubertForCTC(_lowercase )
else:
a : str = HubertModel(_lowercase )
if is_finetuned:
a, a, a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a, a, a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a : Optional[int] = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 105
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->str:
'''simple docstring'''
a : Optional[Any] = Mock()
a : Dict = conn, Mock()
a : Union[str, Any] = iter([1, None] )
a : Optional[int] = lambda _lowercase : next(_lowercase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 105
| 1
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__lowerCamelCase = get_logger(__name__)
__lowerCamelCase = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class UpperCamelCase__:
@add_start_docstrings(_lowerCamelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase__:
@add_start_docstrings(_lowerCamelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase__( a__ ):
@add_start_docstrings(_lowerCamelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase )
else:
A__ = processor(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ) -> int:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
A__ = scores / self.temperature
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = -float('Inf' ) ,__UpperCAmelCase = 1 ) -> List[Any]:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = lax.top_k(_lowerCamelCase ,scores.shape[-1] )
A__ = jnp.full_like(_lowerCamelCase ,self.filter_value )
A__ = jax.nn.softmax(_lowerCamelCase ,axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_lowerCamelCase ,1 )
score_mask |= score_mask.at[:, 0].set(_lowerCamelCase )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCamelCase )
A__ = jnp.where(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
A__ = jax.lax.sort_key_val(_lowerCamelCase ,_lowerCamelCase )[-1]
return next_scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = -float('Inf' ) ,__UpperCAmelCase = 1 ) -> List[str]:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_lowerCamelCase ,_lowerCamelCase )
A__ = filter_value
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size ,self.filter_value )
A__ = min(self.top_k ,scores.shape[-1] ) # Safety check
A__ = lax.top_k(_lowerCamelCase ,_lowerCamelCase )
A__ = jnp.broadcast_to((jnp.arange(_lowerCamelCase ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_lowerCamelCase )
A__ = next_scores_flat.reshape(_lowerCamelCase ,_lowerCamelCase )
return next_scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ) -> Any:
A__ = bos_token_id
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = jnp.full(scores.shape ,-float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_lowerCamelCase ,new_scores.at[:, self.bos_token_id].set(0 ) ,_lowerCamelCase )
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = max_length
A__ = eos_token_id
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = jnp.full(scores.shape ,-float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_lowerCamelCase ,new_scores.at[:, self.eos_token_id].set(0 ) ,_lowerCamelCase )
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
A__ = jnp.where(_lowerCamelCase ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,_lowerCamelCase )
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = list(_lowerCamelCase )
A__ = begin_index
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_lowerCamelCase ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,_lowerCamelCase )
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = list(_lowerCamelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ) -> Dict:
A__ = dict(_lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_lowerCamelCase )
A__ = jnp.intaa(_lowerCamelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
def _force_token(__UpperCAmelCase ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_lowerCamelCase ,dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_lowerCamelCase ,_lowerCamelCase ,(0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(_lowerCamelCase ) ,lambda: scores ,) ,)
return scores
class UpperCamelCase__( a__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCamelCase ,'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 ,_lowerCamelCase ,_lowerCamelCase )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,_lowerCamelCase ,)
A__ = jnp.where((cur_len - self.begin_index) < 2 ,_lowerCamelCase ,_lowerCamelCase )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,_lowerCamelCase ,_lowerCamelCase ,)
return jnp.where(
_lowerCamelCase ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,_lowerCamelCase ,)
A__ = jax.vmap(_lowerCamelCase )(_lowerCamelCase ,_lowerCamelCase )
A__ = jnp.where(cur_len == self.begin_index ,_lowerCamelCase ,_lowerCamelCase )
A__ = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,_lowerCamelCase ,)
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_lowerCamelCase ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,_lowerCamelCase ,)
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_lowerCamelCase ,axis=-1 )
def handle_cumulative_probs(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,_lowerCamelCase ,)
A__ = jax.vmap(_lowerCamelCase )(_lowerCamelCase ,_lowerCamelCase )
return scores
| 354
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , UpperCamelCase__ ).groups()[0]
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> List[str]:
A__ = file_names
A__ = image_transform
A__ = label_to_id
def __len__( self ) -> Dict:
return len(self.file_names )
def __getitem__( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.file_names[idx]
A__ = PIL.Image.open(__UpperCAmelCase )
A__ = raw_image.convert('RGB' )
if self.image_transform is not None:
A__ = self.image_transform(__UpperCAmelCase )
A__ = extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
A__ = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = config['image_size']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
A__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A__ = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A__ = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A__ = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
A__ = [extract_label(UpperCamelCase__ ) for fname in file_names]
A__ = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A__ = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A__ = np.random.permutation(len(UpperCamelCase__ ) )
A__ = int(0.8 * len(UpperCamelCase__ ) )
A__ = random_perm[:cut]
A__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A__ = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A__ = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = create_model('resnet50d' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A__ = False
for param in model.get_classifier().parameters():
A__ = True
# We normalize the batches of images to be a bit faster.
A__ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
A__ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A__ = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the starting epoch so files are named properly
A__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A__ = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A__ = int(training_difference.replace('epoch_' , '' ) ) + 1
A__ = None
else:
A__ = int(training_difference.replace('step_' , '' ) )
A__ = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A__ = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
A__ = model(UpperCamelCase__ )
A__ = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A__ = 0
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
with torch.no_grad():
A__ = model(UpperCamelCase__ )
A__ = outputs.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['label']) )
A__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A__ = F'''epoch_{epoch}'''
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=UpperCamelCase__ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
A__ = parser.parse_args()
A__ = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 154
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
UpperCAmelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase ):
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = matrix_length // 2
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def a_ ( lowerCamelCase ):
return len(lowerCamelCase ), len(matrix[0] )
def a_ ( lowerCamelCase ):
print('\n'.join(str(lowerCamelCase ) for line in matrix ) )
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
UpperCAmelCase__ = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase__ = max(*lowerCamelCase , *lowerCamelCase )
UpperCAmelCase__ = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
UpperCAmelCase__ = matrixa
UpperCAmelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase__ : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 98
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = FunnelConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : Optional[int] = FunnelBaseModel(_SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 67
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67
| 1
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = os.path.join(args.tf_model_dir , '''parameters.json''' )
A_ : Union[str, Any] = json.loads(open(_UpperCAmelCase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
A_ : Optional[Any] = args.output + '''.pt'''
A_ : Tuple = OrderedDict()
with tf.device('''/CPU:0''' ):
A_ : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
A_ : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A_ : int = reader.get_tensor(_UpperCAmelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
A_ : int = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
A_ : int = 8
A_ : Optional[int] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Any = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('''model/moe''' ):
A_ : int = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
A_ : str = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
A_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : List[str] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
A_ : Any = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
A_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Dict = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
A_ : Any = key_name[-9:-7]
for i in range(16 ):
A_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
A_ : List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A_ : Tuple = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('''model/mlp''' ):
A_ : Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
A_ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
A_ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Optional[int] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/p1/bias''' ):
A_ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
A_ : int = vnp.copy() # same because it is one dimensional
A_ : List[str] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/p2/kernel''' ):
A_ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
A_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Optional[Any] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/p2/bias''' ):
A_ : Dict = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
A_ : Any = vnp.copy() # same because it is one dimensional
A_ : Dict = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('''model/ln''' ):
A_ : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A_ : Any = '''model.blocks.%d.feed_forward.norm.bias''' % player
A_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
A_ : Any = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/g''' ):
A_ : Optional[int] = '''model.blocks.%d.feed_forward.norm.weight''' % player
A_ : List[Any] = vnp.copy() # same because it is one dimensional
A_ : Tuple = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('''model/att''' ):
A_ : Optional[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
A_ : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A_ : Tuple = state[:, 0, :, :]
A_ : Optional[int] = state[:, 1, :, :]
A_ : str = state[:, 2, :, :]
A_ : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : List[str] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : str = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
A_ : Any = torch.tensor(_UpperCAmelCase )
A_ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
A_ : str = torch.tensor(_UpperCAmelCase )
A_ : Optional[int] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
A_ : Optional[int] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/o/kernel''' ):
A_ : Optional[int] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
A_ : List[str] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A_ : Any = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('''model/an''' ):
A_ : List[str] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A_ : Any = '''model.blocks.%d.self_attn.norm.bias''' % player
A_ : str = vnp.copy() # same because it is one dimensional
A_ : Optional[int] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('''/g''' ):
A_ : Tuple = '''model.blocks.%d.self_attn.norm.weight''' % player
A_ : Dict = vnp.copy() # same because it is one dimensional
A_ : Optional[int] = torch.tensor(_UpperCAmelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
A_ : Optional[int] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
A_ : Tuple = '''model.%s.weight''' % nlayer
A_ : Any = vnp.copy() # same in embedded
A_ : Dict = torch.tensor(_UpperCAmelCase )
if key_name.startswith('''model/wte''' ):
A_ : int = '''lm_head.weight'''
A_ : int = vnp.copy() # same in embedded
A_ : int = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('''model/wob''' ):
A_ : Tuple = '''final_logits_bias'''
A_ : Tuple = vnp.copy() # same in embedded
A_ : str = state.reshape((1, -1) )
A_ : List[str] = torch.tensor(_UpperCAmelCase )
elif key_name == "model/dense/kernel":
A_ : List[Any] = '''model.last_project.weight'''
A_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A_ : Tuple = torch.tensor(_UpperCAmelCase )
elif key_name == "model/dense_1/bias":
A_ : Tuple = '''model.last_project.bias'''
A_ : List[Any] = vnp.copy() # same because it is one dimensional
A_ : Any = torch.tensor(_UpperCAmelCase )
torch.save(_UpperCAmelCase , args.output )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_lowerCamelCase : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 167
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _UpperCAmelCase = "" ):
"""simple docstring"""
A_ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ : str = BeautifulSoup(requests.get(_UpperCAmelCase ).text , '''html.parser''' )
A_ : List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ : List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_UpperCAmelCase , _UpperCAmelCase )
}
def lowercase_ ( _UpperCAmelCase = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
A_ : Any = get_imdb_top_aaa_movies()
with open(_UpperCAmelCase , '''w''' , newline='''''' ) as out_file:
A_ : List[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 167
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> Any:
_a = 1
_a = 3
_a = (32, 32)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type='''v_prediction''' )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_a = output.images
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCAmelCase , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_a = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type='''v_prediction''' )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_a = output.images
assert image.shape[0] == 2
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_a = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type='''v_prediction''' )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_a = unet.half()
_a = text_encoder.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
_a = '''stabilityai/stable-diffusion-x4-upscaler'''
_a = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_a = '''a cat sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _UpperCAmelCase ( self ) -> Dict:
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
_a = '''stabilityai/stable-diffusion-x4-upscaler'''
_a = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_a = '''a cat sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _UpperCAmelCase ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_a = '''stabilityai/stable-diffusion-x4-upscaler'''
_a = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = '''a cat sitting on a park bench'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 363
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_a = load_file(_lowerCAmelCase )
_a = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_a = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_a = pipeline.text_encoder
else:
_a = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_a = pipeline.unet
# find the target layer
_a = layer_infos.pop(0 )
while len(_lowerCAmelCase ) > -1:
try:
_a = curr_layer.__getattr__(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = layer_infos.pop(0 )
elif len(_lowerCAmelCase ) == 0:
break
except Exception:
if len(_lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_a = layer_infos.pop(0 )
_a = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''', '''lora_up''' ) )
pair_keys.append(_lowerCAmelCase )
else:
pair_keys.append(_lowerCAmelCase )
pair_keys.append(key.replace('''lora_up''', '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_a = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_a = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase, _lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
_a = state_dict[pair_keys[0]].to(torch.floataa )
_a = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase, _lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_lowerCAmelCase )
return pipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__snake_case = parser.parse_args()
__snake_case = args.base_model_path
__snake_case = args.checkpoint_path
__snake_case = args.dump_path
__snake_case = args.lora_prefix_unet
__snake_case = args.lora_prefix_text_encoder
__snake_case = args.alpha
__snake_case = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__snake_case = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 153
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCamelCase :
def __init__( self :List[str] , lowerCamelCase :int , lowerCamelCase :Union[str, Any]=13 , lowerCamelCase :Union[str, Any]=7 , lowerCamelCase :Dict=6 , lowerCamelCase :Union[str, Any]=17 , lowerCamelCase :str=23 , lowerCamelCase :List[Any]=11 , lowerCamelCase :List[str]=True , ) -> Optional[Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = act_dim
UpperCAmelCase__ = state_dim
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = is_training
def UpperCAmelCase_ ( self :Tuple ) -> Tuple:
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
UpperCAmelCase__ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :int , lowerCamelCase :List[str] , lowerCamelCase :Optional[int] , lowerCamelCase :List[str] , lowerCamelCase :List[Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :Dict , ) -> List[Any]:
UpperCAmelCase__ = DecisionTransformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase_ = ()
UpperCAmelCase_ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :Optional[Any] ) -> int:
UpperCAmelCase__ = DecisionTransformerModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self :Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :Tuple ) -> int:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Optional[int] ) -> List[Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DecisionTransformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowerCamelCase )] , lowerCamelCase )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :Any ) -> List[str]:
UpperCAmelCase__ = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ = 10 # defined by the RL environment, may be normalized
UpperCAmelCase__ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ = model.to(lowerCamelCase )
UpperCAmelCase__ = model.config
torch.manual_seed(0 )
UpperCAmelCase__ = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowerCamelCase )
UpperCAmelCase__ = torch.tensor(lowerCamelCase , device=lowerCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ = state
UpperCAmelCase__ = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase , dtype=torch.floataa )
UpperCAmelCase__ = torch.zeros(1 , 0 , device=lowerCamelCase , dtype=torch.floataa )
UpperCAmelCase__ = torch.tensor(0 , device=lowerCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCamelCase ):
UpperCAmelCase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase )] , dim=1 )
UpperCAmelCase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase )] , dim=1 )
UpperCAmelCase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model(
states=lowerCamelCase , actions=lowerCamelCase , rewards=lowerCamelCase , returns_to_go=lowerCamelCase , timesteps=lowerCamelCase , attention_mask=lowerCamelCase , return_dict=lowerCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ = action_pred[0, -1]
UpperCAmelCase__ = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ = returns_to_go[0, -1] - reward
UpperCAmelCase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 169
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[Any] = logging.get_logger(__name__)
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.load(__SCREAMING_SNAKE_CASE, map_location='''cpu''' )
if "model" in sd.keys():
_UpperCamelCase = torch.load(__SCREAMING_SNAKE_CASE, map_location='''cpu''' )["model"]
# pop unnecessary weights
_UpperCamelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_UpperCamelCase = sd.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
_UpperCamelCase = key.replace('''.qkv_proj.''', '''.q_proj.''' )
_UpperCamelCase = key.replace('''.qkv_proj.''', '''.k_proj.''' )
_UpperCamelCase = key.replace('''.qkv_proj.''', '''.v_proj.''' )
_UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_UpperCamelCase = torch.split(__SCREAMING_SNAKE_CASE, depth // 3, dim=0 )
_UpperCamelCase = q
_UpperCamelCase = k
_UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase : Optional[Any], lowercase : List[str], lowercase : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = load_checkpoint(__SCREAMING_SNAKE_CASE )
if config is not None:
_UpperCamelCase = OPTConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
_UpperCamelCase = OPTConfig()
_UpperCamelCase = OPTModel(__SCREAMING_SNAKE_CASE ).half().eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check results
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase__ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 370
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 287
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[Any] = model
snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ )
snake_case : int = kwargs.get("latest_model_name" , snake_case__ )
def __call__(self : Tuple , **snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : str = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
snake_case : Union[str, Any] = Path(snake_case__ )
# load model from hub
else:
# download model
snake_case : Dict = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
snake_case : List[Any] = Path(snake_case__ ).parent
snake_case : Union[str, Any] = Path(snake_case__ ).name
snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = None
if len(str(snake_case__ ).split("@" ) ) == 2:
snake_case , snake_case : int = model_id.split("@" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 59
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = """pytorch_model.bin"""
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,)
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "The name of the task to train on."} ,)
A__ : Optional[List[str]] = dataclasses.field(
default=A_ ,metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" ,metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} ,)
A__ : Optional[int] = dataclasses.field(
default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,)
A__ : Optional[float] = dataclasses.field(
default=0.0 ,metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,)
A__ : Optional[float] = dataclasses.field(
default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,)
A__ : Optional[int] = dataclasses.field(
default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,)
A__ : Optional[int] = dataclasses.field(
default=A_ ,metadata={"help": "Random seed for initialization."} ,)
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case : int = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase )
snake_case : Tuple = dataset.select(range(__lowerCamelCase ) )
snake_case : List[Any] = dataset.remove_columns(["label", "probability"] )
snake_case : Any = dataset.rename_column("prediction" , "label" )
snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
snake_case : List[str] = dataset.shuffle(seed=args.seed )
snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ):
snake_case : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase )
snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase )
snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase )
snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase , __lowerCamelCase ):
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Sanity checks
snake_case : List[str] = {}
snake_case : Optional[int] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case : str = args.train_file
snake_case : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case : Tuple = args.eval_file
for key in data_files:
snake_case : List[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
snake_case : Union[str, Any] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format
snake_case : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
snake_case : Dict = None
snake_case : Union[str, Any] = None
snake_case : Tuple = 0
snake_case : List[Any] = False
# Show the progress bar
snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case : str = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" )
snake_case : Optional[Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ):
arguments_dict.update({key: value} )
snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" )
snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" )
# Update arguments_dict
snake_case : List[str] = model_path
snake_case : Optional[Any] = data_files["train"]
snake_case : Optional[Any] = current_output_dir
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase )
snake_case : int = iteration
snake_case : Tuple = data_dir_format(iteration + 1 )
snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) )
snake_case : Optional[int] = config.idalabel
snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" )
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase , "r" ) as f:
snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] )
snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.wait_for_everyone()
snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case : List[Any] = eval_result
if best_iteration is None:
snake_case : List[Any] = new_iteration
snake_case : int = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case : int = new_iteration
snake_case : Union[str, Any] = new_eval_result
snake_case : str = 0
else:
if new_eval_result == best_eval_result:
snake_case : Any = new_iteration
snake_case : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case : Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __lowerCamelCase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
| 59
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCAmelCase = logging.getLogger(__name__)
class snake_case_ ( __lowercase ):
def UpperCAmelCase__ ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=None )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.layer[current_layer](_snake_case , _snake_case , head_mask[current_layer] )
__lowerCAmelCase : List[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' ,__lowercase ,)
class snake_case_ ( __lowercase ):
def __init__( self : List[Any] , _snake_case : str )->Dict:
'''simple docstring'''
super().__init__(_snake_case )
__lowerCAmelCase : str = BertEncoderWithPabee(_snake_case )
self.init_weights()
__lowerCAmelCase : str = 0
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : int = 0
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : str )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = threshold
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : List[str] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = patience
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = 0
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = self.inference_layers_num / self.inference_instances_num
__lowerCAmelCase : str = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_snake_case )
@add_start_docstrings_to_model_forward(_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Dict=None , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : Dict=None , _snake_case : Any=None , _snake_case : Tuple=False , )->int:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__lowerCAmelCase : Dict = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase : Dict = torch.ones(_snake_case , device=_snake_case )
if token_type_ids is None:
__lowerCAmelCase : List[Any] = torch.zeros(_snake_case , dtype=torch.long , device=_snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(_snake_case , _snake_case , _snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = encoder_hidden_states.size()
__lowerCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowerCAmelCase : str = torch.ones(_snake_case , device=_snake_case )
__lowerCAmelCase : List[Any] = self.invert_attention_mask(_snake_case )
else:
__lowerCAmelCase : Tuple = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase : Dict = self.get_head_mask(_snake_case , self.config.num_hidden_layers )
__lowerCAmelCase : List[Any] = self.embeddings(
input_ids=_snake_case , position_ids=_snake_case , token_type_ids=_snake_case , inputs_embeds=_snake_case )
__lowerCAmelCase : Any = embedding_output
if self.training:
__lowerCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
__lowerCAmelCase : List[Any] = self.encoder.adaptive_forward(
_snake_case , current_layer=_snake_case , attention_mask=_snake_case , head_mask=_snake_case )
__lowerCAmelCase : Optional[int] = self.pooler(_snake_case )
__lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(_snake_case ) )
res.append(_snake_case )
elif self.patience == 0: # Use all layers for inference
__lowerCAmelCase : Optional[int] = self.encoder(
_snake_case , attention_mask=_snake_case , head_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
__lowerCAmelCase : Dict = self.pooler(encoder_outputs[0] )
__lowerCAmelCase : Optional[Any] = [output_layers[self.config.num_hidden_layers - 1](_snake_case )]
else:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Any = None
__lowerCAmelCase : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
_snake_case , current_layer=_snake_case , attention_mask=_snake_case , head_mask=_snake_case )
__lowerCAmelCase : List[Any] = self.pooler(_snake_case )
__lowerCAmelCase : Tuple = output_layers[i](_snake_case )
if regression:
__lowerCAmelCase : Union[str, Any] = logits.detach()
if patient_result is not None:
__lowerCAmelCase : Any = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowerCAmelCase : int = 0
else:
__lowerCAmelCase : Optional[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowerCAmelCase : Optional[int] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_snake_case ) ):
patient_counter += 1
else:
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : int = logits
if patient_counter == self.patience:
break
__lowerCAmelCase : Any = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' ,__lowercase ,)
class snake_case_ ( __lowercase ):
def __init__( self : Dict , _snake_case : Tuple )->Union[str, Any]:
'''simple docstring'''
super().__init__(_snake_case )
__lowerCAmelCase : Any = config.num_labels
__lowerCAmelCase : Optional[Any] = BertModelWithPabee(_snake_case )
__lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_snake_case )
def UpperCAmelCase__ ( self : Dict , _snake_case : int=None , _snake_case : List[str]=None , _snake_case : str=None , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : Tuple=None , _snake_case : Dict=None , )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.bert(
input_ids=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowerCAmelCase : List[str] = (logits[-1],)
if labels is not None:
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : List[str] = 0
for ix, logits_item in enumerate(_snake_case ):
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase : Union[str, Any] = MSELoss()
__lowerCAmelCase : str = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase : Dict = CrossEntropyLoss()
__lowerCAmelCase : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowerCAmelCase : Optional[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowerCAmelCase : List[str] = (total_loss / total_weights,) + outputs
return outputs
| 232
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_UpperCAmelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : int = json.loads(f.read() )
__lowerCAmelCase : Dict = collections.OrderedDict()
__lowerCAmelCase : str = collections.OrderedDict()
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Tuple = f.readlines()
__lowerCAmelCase : Tuple = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = b
__lowerCAmelCase : Dict = idx
for wd in b:
__lowerCAmelCase : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any="<|endoftext|>" , _snake_case : str="<|endoftext|>" , _snake_case : str="<|startoftext|>" , _snake_case : List[Any]="<|endoftext|>" , _snake_case : str=False , **_snake_case : List[Any] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCAmelCase : Any = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = load_vocab_and_emoji(_snake_case , _snake_case )
__lowerCAmelCase : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : Tuple )->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any , _snake_case : str )->Optional[int]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] )->Any:
'''simple docstring'''
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : int , _snake_case : Any )->int:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = """""".join(_snake_case ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _snake_case : "Conversation" )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
__lowerCAmelCase : List[str] = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 0
if os.path.isdir(_snake_case ):
__lowerCAmelCase : Dict = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : List[Any] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCAmelCase : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCAmelCase : Dict = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase : List[str] = token_index
writer.write(""",""".join(_snake_case ) + """\n""" )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class snake_case_ ( __lowercase ):
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = vocab # same as swe
__lowerCAmelCase : str = ids_to_tokens # same as bpe
__lowerCAmelCase : Dict = emoji
__lowerCAmelCase : int = np.max([len(_snake_case ) for w in self.vocab.keys()] )
__lowerCAmelCase : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCAmelCase : Optional[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCAmelCase : Optional[Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : Union[str, Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : str = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCAmelCase : List[Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCAmelCase : Union[str, Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCAmelCase : str = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int )->int:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Any )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.content_repattera.sub("""<URL>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , _snake_case )
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub("""<TEL>""" , _snake_case )
__lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<PRICE>""" , _snake_case )
__lowerCAmelCase : List[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : str , _snake_case : List[Any] , _snake_case : Optional[int]=False )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Union[str, Any] = text.replace("""\r\n""" , """<BR>""" )
__lowerCAmelCase : Tuple = text.replace("""\n""" , """<BR>""" )
__lowerCAmelCase : List[str] = text.replace("""\r""" , """<BR>""" )
__lowerCAmelCase : Dict = text.replace("""\t""" , """<TAB>""" )
__lowerCAmelCase : Dict = text.replace("""—""" , """ー""" )
__lowerCAmelCase : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : Optional[Any] = text.replace(_snake_case , _snake_case )
if clean:
__lowerCAmelCase : List[Any] = self.clean_text(_snake_case )
def check_simbol(_snake_case : List[str] ):
__lowerCAmelCase : Optional[int] = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
__lowerCAmelCase : Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(_snake_case : Union[str, Any] ):
__lowerCAmelCase : Dict = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
__lowerCAmelCase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = []
while pos < len(_snake_case ):
__lowerCAmelCase : str = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCAmelCase : Tuple = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
__lowerCAmelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
__lowerCAmelCase : Tuple = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
__lowerCAmelCase : int = e
else:
__lowerCAmelCase : Dict = pos + 1
__lowerCAmelCase : Dict = text[pos:end]
if check_simbol(_snake_case ):
result.append("""<KIGOU>""" )
elif checkuae(_snake_case ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCAmelCase : int = end
return result
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] , _snake_case : List[Any]="\n" )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Dict = """""".join(_snake_case )
return text
| 232
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = BlenderbotConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self : Tuple , A : str , A : Any=13 , A : Dict=7 , A : Optional[int]=True , A : List[Any]=False , A : Tuple=99 , A : Any=32 , A : Dict=2 , A : Optional[int]=4 , A : Union[str, Any]=37 , A : Dict=0.1 , A : List[str]=0.1 , A : Dict=20 , A : Union[str, Any]=2 , A : List[Any]=1 , A : str=0 , ):
__snake_case: Optional[Any] = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = seq_length
__snake_case: Any = is_training
__snake_case: Optional[Any] = use_labels
__snake_case: Dict = vocab_size
__snake_case: Optional[int] = hidden_size
__snake_case: Optional[Any] = num_hidden_layers
__snake_case: Any = num_attention_heads
__snake_case: Any = intermediate_size
__snake_case: Any = hidden_dropout_prob
__snake_case: Dict = attention_probs_dropout_prob
__snake_case: List[str] = max_position_embeddings
__snake_case: Tuple = eos_token_id
__snake_case: Optional[int] = pad_token_id
__snake_case: Optional[Any] = bos_token_id
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case: Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case: str = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case: List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case: int = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] , A : List[str] , A : str ):
__snake_case: Optional[int] = TFBlenderbotModel(config=A ).get_decoder()
__snake_case: Any = inputs_dict["""input_ids"""]
__snake_case: Tuple = input_ids[:1, :]
__snake_case: List[Any] = inputs_dict["""attention_mask"""][:1, :]
__snake_case: Optional[int] = inputs_dict["""head_mask"""]
__snake_case: Dict = 1
# first forward pass
__snake_case: List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
__snake_case , __snake_case: List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case: str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case: int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case: Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case: Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case: Union[str, Any] = model(A , attention_mask=A )[0]
__snake_case: List[Any] = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case: Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case: str = output_from_no_past[:, -3:, random_slice_idx]
__snake_case: str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1E-3 )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> Tuple:
if attention_mask is None:
__snake_case: List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
__snake_case: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
__snake_case: int = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__snake_case: Any = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__snake_case: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = TFBlenderbotModelTester(self )
__snake_case: Dict = ConfigTester(self , config_class=A )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase__ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[Any] = self.tokenizer(self.src_text , return_tensors="""tf""" )
__snake_case: str = self.model.generate(
model_inputs.input_ids , )
__snake_case: List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 111
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCAmelCase : Optional[int] = trt.Logger(trt.Logger.WARNING)
__UpperCAmelCase : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCAmelCase : Tuple = parser.parse_args()
if args.tokenizer_name:
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCAmelCase : Optional[Any] = args.per_device_eval_batch_size
__UpperCAmelCase : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCAmelCase : Tuple = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCAmelCase : List[Any] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCAmelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCAmelCase : int = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCAmelCase : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCAmelCase : Optional[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCAmelCase : Any = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCAmelCase : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Tuple = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
__snake_case: Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
__snake_case: List[str] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__)
# start time
__snake_case: int = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE__) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__), int(SCREAMING_SNAKE_CASE__)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Synchronize the stream and take time
stream.synchronize()
# end time
__snake_case: Optional[Any] = time.time()
__snake_case: Dict = end_time - start_time
__snake_case: Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCAmelCase : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCAmelCase : str = raw_datasets["validation"].column_names
__UpperCAmelCase : Dict = "question" if "question" in column_names else column_names[0]
__UpperCAmelCase : List[Any] = "context" if "context" in column_names else column_names[1]
__UpperCAmelCase : List[str] = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCAmelCase : List[str] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__UpperCAmelCase : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__snake_case: Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__snake_case: List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__snake_case: Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__snake_case: int = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__snake_case: int = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__snake_case: Any = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__snake_case: Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__UpperCAmelCase : int = raw_datasets["validation"]
# Validation Feature Creation
__UpperCAmelCase : Dict = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCAmelCase : Dict = default_data_collator
__UpperCAmelCase : List[Any] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCAmelCase : str = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="eval") -> Optional[int]:
# Post-processing: we match the start logits and end logits to answers in the original context.
__snake_case: Optional[Any] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__snake_case: Tuple = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__snake_case: str = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__snake_case: Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__)
__UpperCAmelCase : List[str] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__)) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__).itemsize
# Allocate device memory for inputs and outputs.
__UpperCAmelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCAmelCase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCAmelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCAmelCase : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
__UpperCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCAmelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
__UpperCAmelCase : Optional[Any] = 0.0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = timeit.default_timer()
__UpperCAmelCase : Union[str, Any] = None
for step, batch in enumerate(eval_dataloader):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCAmelCase , __UpperCAmelCase : str = outputs
__UpperCAmelCase : Any = torch.tensor(start_logits)
__UpperCAmelCase : Tuple = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCAmelCase : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCAmelCase : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCAmelCase : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCAmelCase : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCAmelCase : Union[str, Any] = nested_truncate(all_preds, len(eval_dataset))
__UpperCAmelCase : List[str] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
__UpperCAmelCase : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCAmelCase : Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 111
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : int = 'markuplm'
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=216 , SCREAMING_SNAKE_CASE__=1001 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : List[Any] = hidden_act
lowercase : str = intermediate_size
lowercase : int = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : int = initializer_range
lowercase : Any = layer_norm_eps
lowercase : int = position_embedding_type
lowercase : Any = use_cache
lowercase : int = classifier_dropout
# additional properties
lowercase : str = max_depth
lowercase : Tuple = max_xpath_tag_unit_embeddings
lowercase : Union[str, Any] = max_xpath_subs_unit_embeddings
lowercase : int = tag_pad_id
lowercase : Any = subs_pad_id
lowercase : int = xpath_unit_hidden_size
| 173
|
def __lowercase ( _UpperCamelCase = 50 ) ->int:
"""simple docstring"""
lowercase : str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.