code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class a__ ( A__ ):
UpperCAmelCase__ = '''gpt_bigcode'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Optional[Any] , _lowerCamelCase :Optional[int]=50_257 , _lowerCamelCase :Optional[Any]=1_024 , _lowerCamelCase :Optional[int]=768 , _lowerCamelCase :List[Any]=12 , _lowerCamelCase :Any=12 , _lowerCamelCase :str=None , _lowerCamelCase :Optional[Any]="gelu_pytorch_tanh" , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Union[str, Any]=0.1 , _lowerCamelCase :List[str]=0.1 , _lowerCamelCase :List[Any]=1E-5 , _lowerCamelCase :List[str]=0.02 , _lowerCamelCase :List[str]=True , _lowerCamelCase :Any=True , _lowerCamelCase :Tuple=50_256 , _lowerCamelCase :Optional[Any]=50_256 , _lowerCamelCase :Optional[Any]=True , _lowerCamelCase :str=True , _lowerCamelCase :Tuple=True , **_lowerCamelCase :Optional[Any] , ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =vocab_size
UpperCamelCase_ : int =n_positions
UpperCamelCase_ : Optional[int] =n_embd
UpperCamelCase_ : Any =n_layer
UpperCamelCase_ : Any =n_head
UpperCamelCase_ : Dict =n_inner
UpperCamelCase_ : Dict =activation_function
UpperCamelCase_ : Optional[int] =resid_pdrop
UpperCamelCase_ : Union[str, Any] =embd_pdrop
UpperCamelCase_ : Optional[Any] =attn_pdrop
UpperCamelCase_ : Dict =layer_norm_epsilon
UpperCamelCase_ : Union[str, Any] =initializer_range
UpperCamelCase_ : List[Any] =scale_attn_weights
UpperCamelCase_ : Optional[Any] =use_cache
UpperCamelCase_ : str =attention_softmax_in_fpaa
UpperCamelCase_ : str =scale_attention_softmax_in_fpaa
UpperCamelCase_ : Tuple =multi_query
UpperCamelCase_ : Any =bos_token_id
UpperCamelCase_ : Dict =eos_token_id
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
| 357 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a__ ( A__ ):
UpperCAmelCase__ = ''''''
UpperCAmelCase__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self :Dict , _lowerCamelCase :Optional[DatasetInfo] = None , _lowerCamelCase :Optional[str] = None , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
super().__init__(self , **_lowerCamelCase )
UpperCamelCase_ : List[str] =repo_info
UpperCamelCase_ : Any =token
UpperCamelCase_ : Tuple =None
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
if self.dir_cache is None:
UpperCamelCase_ : Any ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCamelCase_ : Optional[Any] ={
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'name': str(_lowerCamelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :str = "rb" , **_lowerCamelCase :str , ):
'''simple docstring'''
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
UpperCamelCase_ : List[Any] =hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :Tuple , **_lowerCamelCase :Any ):
'''simple docstring'''
self._get_dirs()
UpperCamelCase_ : Tuple =self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any]=False , **_lowerCamelCase :Any ):
'''simple docstring'''
self._get_dirs()
UpperCamelCase_ : str =PurePosixPath(path.strip('/' ) )
UpperCamelCase_ : List[str] ={}
for p, f in self.dir_cache.items():
UpperCamelCase_ : List[Any] =PurePosixPath(p.strip('/' ) )
UpperCamelCase_ : Tuple =p.parent
if root == path:
UpperCamelCase_ : int =f
UpperCamelCase_ : Optional[int] =list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 357 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 701 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'scipy']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: str) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch", "scipy"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"]) | 615 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowercase = '''\
Text data.
Second line of data.'''
_lowercase = '''file'''
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( __magic_name__ ) ->int:
__lowercase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__lowercase = bytes(__magic_name__ , "utf-8" )
with zstd.open(__magic_name__ , "wb" ) as f:
f.write(__magic_name__ )
return path
@pytest.fixture
def lowerCAmelCase__ ( __magic_name__ ) ->str:
with open(os.path.join(tmpfs.local_root_dir , __magic_name__ ) , "w" ) as f:
f.write(__magic_name__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ->List[str]:
__lowercase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__lowercase = input_paths[compression_format]
__lowercase = tmp_path / "cache"
__lowercase = DownloadConfig(cache_dir=__magic_name__ , extract_compressed_file=__magic_name__ )
__lowercase = cached_path(__magic_name__ , download_config=__magic_name__ )
with open(__magic_name__ ) as f:
__lowercase = f.read()
with open(__magic_name__ ) as f:
__lowercase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ->int:
__lowercase = "custom_cache"
__lowercase = "custom_extracted_dir"
__lowercase = tmp_path / "custom_extracted_path"
if default_extracted:
__lowercase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __magic_name__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__magic_name__ ) )
__lowercase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowercase = xz_file
__lowercase = (
DownloadConfig(extract_compressed_file=__magic_name__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__magic_name__ )
)
__lowercase = cached_path(__magic_name__ , download_config=__magic_name__ )
assert Path(__magic_name__ ).parent.parts[-2:] == expected
def lowerCAmelCase__ ( __magic_name__ ) ->Any:
# absolute path
__lowercase = str(Path(__magic_name__ ).resolve() )
assert cached_path(__magic_name__ ) == text_file
# relative path
__lowercase = str(Path(__magic_name__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__magic_name__ ) == text_file
def lowerCAmelCase__ ( __magic_name__ ) ->Union[str, Any]:
# absolute path
__lowercase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
# relative path
__lowercase = "./__missing_file__.txt"
with pytest.raises(__magic_name__ ):
cached_path(__magic_name__ )
def lowerCAmelCase__ ( __magic_name__ ) ->List[Any]:
__lowercase = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(__magic_name__ ) as f:
__lowercase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowerCAmelCase__ ( ) ->Tuple:
with pytest.raises(__magic_name__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowerCAmelCase__ ( __magic_name__ ) ->Dict:
__lowercase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__magic_name__ ):
http_get("https://huggingface.co" , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowerCAmelCase__ ( __magic_name__ ) ->Any:
__lowercase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__magic_name__ ):
ftp_get("ftp://huggingface.co" , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __magic_name__ )
def lowerCAmelCase__ ( __magic_name__ ) ->Optional[Any]:
__lowercase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__magic_name__ ):
fsspec_get("s3://huggingface.co" , temp_file=__magic_name__ )
with pytest.raises(__magic_name__ ):
fsspec_head("s3://huggingface.co" )
| 118 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowercase = 2_048
_lowercase = 4_096
_lowercase = 42
_lowercase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_lowercase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCAmelCase__ ( __magic_name__ ) ->str:
def choose_first(__magic_name__ , __magic_name__=False ):
assert isinstance(__magic_name__ , __magic_name__ )
if len(__magic_name__ ) == 1:
__lowercase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__lowercase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
__lowercase = {"id": example["id"]}
__lowercase = example["annotations"]
__lowercase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
__lowercase = ["yes"] if 1 in yes_no_answer else ["no"]
__lowercase = __lowercase = []
__lowercase = __lowercase = []
__lowercase = ["<cls>"]
else:
__lowercase = ["short"]
__lowercase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
__lowercase = ["long"]
__lowercase = choose_first(annotation["long_answer"] , is_long_answer=__magic_name__ )
__lowercase = []
answer.update(__magic_name__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
__lowercase = True
else:
__lowercase = False
__lowercase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __magic_name__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowerCAmelCase__ ( __magic_name__ , __magic_name__=False ) ->int:
__lowercase = _get_single_answer(__magic_name__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase = example["document"]["tokens"]
__lowercase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__magic_name__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__lowercase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__lowercase = example["document"]["tokens"]
__lowercase = answer["start_token"]
__lowercase = answer["end_token"]
__lowercase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__lowercase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
__lowercase = doc["is_html"][answer["start_token"] : answer["end_token"]]
__lowercase = doc["token"][answer["start_token"] : answer["end_token"]]
__lowercase = " ".join([old[i] for i in range(len(__magic_name__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __magic_name__ , end="\n" )
print("Old:" , __magic_name__ , end="\n\n" )
return {
"context": " ".join(__magic_name__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=2_0_4_8 , __magic_name__=4_0_9_6 , __magic_name__=True ) ->Optional[Any]:
# overlap will be of doc_stride - q_len
__lowercase = get_context_and_ans(__magic_name__ , assertion=__magic_name__ )
__lowercase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__lowercase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
__lowercase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase = []
__lowercase = []
__lowercase = input_ids[:q_len]
__lowercase = range(__magic_name__ , len(__magic_name__ ) , max_length - doc_stride )
for i in doc_start_indices:
__lowercase = i + max_length - q_len
__lowercase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__magic_name__ ),
"end_token": [-1_0_0] * len(__magic_name__ ),
"category": category,
},
}
__lowercase = out["context"].split()
__lowercase = splitted_context[answer["end_token"]]
__lowercase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__magic_name__ , ).input_ids )
__lowercase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__magic_name__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__lowercase = len(tokenizer(__magic_name__ , add_special_tokens=__magic_name__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__lowercase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
__lowercase = answer["start_token"]
__lowercase = answer["end_token"]
if assertion:
__lowercase = tokenizer.decode(__magic_name__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __magic_name__ , end="\n\n" )
if len(__magic_name__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__lowercase = input_ids[:q_len]
__lowercase = range(__magic_name__ , len(__magic_name__ ) , max_length - doc_stride )
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = [] # null, yes, no, long, short
for i in doc_start_indices:
__lowercase = i + max_length - q_len
__lowercase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__lowercase = start_token - i + q_len
__lowercase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
__lowercase = -1_0_0
__lowercase = -1_0_0
answers_category.append("null" )
__lowercase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__magic_name__ )
answers_end_token.append(__magic_name__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__magic_name__ ) )
print("Old:" , tokenizer.decode(__magic_name__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=2_0_4_8 , __magic_name__=4_0_9_6 , __magic_name__=False ) ->List[str]:
__lowercase = get_strided_contexts_and_ans(
__magic_name__ , __magic_name__ , doc_stride=__magic_name__ , max_length=__magic_name__ , assertion=__magic_name__ , )
return example
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any:
with jsonlines.open(__magic_name__ , "a" ) as writer:
for example in tqdm(__magic_name__ , total=len(__magic_name__ ) , desc="Saving samples ... " ):
__lowercase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowercase = load_dataset('''natural_questions''')
_lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_lowercase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_lowercase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowercase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_lowercase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 118 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Tuple = 'fnet'
def __init__( self : Any , A_ : Any=3_20_00 , A_ : Tuple=7_68 , A_ : str=12 , A_ : Optional[int]=30_72 , A_ : Optional[int]="gelu_new" , A_ : Union[str, Any]=0.1 , A_ : Optional[Any]=5_12 , A_ : Optional[Any]=4 , A_ : List[Any]=0.02 , A_ : Dict=1e-1_2 , A_ : Union[str, Any]=False , A_ : Optional[Any]=5_12 , A_ : str=3 , A_ : Optional[int]=1 , A_ : Optional[Any]=2 , **A_ : int , )-> Tuple:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_tpu_fourier_optimizations
__UpperCamelCase = tpu_short_seq_length | 228 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'conditional_detr'
_snake_case : Any = ['past_key_values']
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , A_ : int=True , A_ : List[Any]=None , A_ : int=3 , A_ : Union[str, Any]=3_00 , A_ : int=6 , A_ : List[Any]=20_48 , A_ : str=8 , A_ : Dict=6 , A_ : str=20_48 , A_ : str=8 , A_ : str=0.0 , A_ : List[Any]=0.0 , A_ : Union[str, Any]=True , A_ : List[str]="relu" , A_ : Optional[Any]=2_56 , A_ : Optional[int]=0.1 , A_ : Tuple=0.0 , A_ : List[str]=0.0 , A_ : Any=0.02 , A_ : int=1.0 , A_ : Any=False , A_ : Tuple="sine" , A_ : int="resnet50" , A_ : Dict=True , A_ : List[str]=False , A_ : Optional[Any]=2 , A_ : List[Any]=5 , A_ : List[str]=2 , A_ : Union[str, Any]=1 , A_ : Dict=1 , A_ : str=2 , A_ : Any=5 , A_ : Optional[int]=2 , A_ : List[str]=0.25 , **A_ : Union[str, Any] , )-> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
__UpperCamelCase = backbone_config.get("model_type" )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(A_ )
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = cls_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def A ( self : int )-> int:
return self.encoder_attention_heads
@property
def A ( self : List[Any] )-> int:
return self.d_model
def A ( self : List[Any] )-> Tuple:
__UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCamelCase = self.backbone_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def A ( self : str )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A ( self : Optional[Any] )-> float:
return 1e-5
@property
def A ( self : List[Any] )-> int:
return 12 | 228 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
SCREAMING_SNAKE_CASE_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase , UpperCamelCase , UpperCamelCase = _str_to_version_tuple(self.version_str)
def __repr__( self) -> List[Any]:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.major, self.minor, self.patch
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
return Version(lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
return other
raise TypeError(F'{other} (type {type(lowerCamelCase_)}) cannot be compared to version.')
def __eq__( self , lowerCamelCase_) -> Optional[int]:
try:
UpperCamelCase = self._validate_operand(lowerCamelCase_)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCamelCase_) -> Tuple:
UpperCamelCase = self._validate_operand(lowerCamelCase_)
return self.tuple < other.tuple
def __hash__( self) -> str:
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def UpperCAmelCase__ ( self) -> str:
return self.version_str
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = _VERSION_REG.match(_lowercase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_lowercase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _lowercase ):
"""simple docstring"""
return ".".join(str(_lowercase ) for v in version_tuple ) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__lowerCamelCase : List[str] = TypeVar("T")
class __magic_name__ ( Generic[T] ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : list[T] , UpperCamelCase__ : Callable[[T, T], T] ) -> None:
'''simple docstring'''
UpperCAmelCase = None
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = [any_type for _ in range(self.N )] + arr
UpperCAmelCase = fnc
self.build()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> None:
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : T ) -> None:
'''simple docstring'''
p += self.N
UpperCAmelCase = v
while p > 1:
UpperCAmelCase = p // 2
UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> T | None: # noqa: E741
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = l + self.N, r + self.N
UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase = self.st[l] if res is None else self.fn(UpperCamelCase__ , self.st[l] )
if r % 2 == 0:
UpperCAmelCase = self.st[r] if res is None else self.fn(UpperCamelCase__ , self.st[r] )
UpperCAmelCase , UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__lowerCamelCase : Tuple = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__lowerCamelCase : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__lowerCamelCase : List[str] = SegmentTree(test_array, min)
__lowerCamelCase : Optional[int] = SegmentTree(test_array, max)
__lowerCamelCase : int = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_() -> None:
for i in range(len(lowerCamelCase_ ) ):
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
UpperCAmelCase = reduce(lowerCamelCase_ , test_array[i : j + 1] )
UpperCAmelCase = reduce(lowerCamelCase_ , test_array[i : j + 1] )
UpperCAmelCase = reduce(lambda lowerCamelCase_ , lowerCamelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
assert max_range == max_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
assert sum_range == sum_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
test_all_segments()
for index, value in test_updates.items():
__lowerCamelCase : Any = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 457 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =CpmAntTokenizer
lowercase : Dict =False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = ["今天", "天气", "真", "好", "!"]
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = [tokenizer.bos_token] + tokens
UpperCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 457 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _UpperCAmelCase ( ) -> List[str]:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCAmelCase : Dict = [1, 2, 3]
with pytest.raises(_lowerCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=2 )
with pytest.raises(_lowerCamelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> Tuple:
_lowerCAmelCase : Any = [1, 2]
_lowerCAmelCase : str = {"""a""": 1, """b""": 2}
_lowerCAmelCase : int = {"""a""": [1, 2], """b""": [3, 4]}
_lowerCAmelCase : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
_lowerCAmelCase : Dict = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
_lowerCAmelCase : str = [2, 3]
_lowerCAmelCase : int = {"""a""": 2, """b""": 3}
_lowerCAmelCase : int = {"""a""": [2, 3], """b""": [4, 5]}
_lowerCAmelCase : Any = {"""a""": {"""1""": 2}, """b""": 3}
_lowerCAmelCase : int = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
assert map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) == expected_map_nested_sa
| 384 |
'''simple docstring'''
from collections.abc import Sequence
def _UpperCAmelCase ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : float ) -> float:
_lowerCAmelCase : List[Any] = 0.0
for coeff in reversed(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase_ = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 384 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : torch.FloatTensor
a : torch.FloatTensor
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : Optional[int] = 1
@register_to_config
def __init__( self , lowerCamelCase = 2000 , lowerCamelCase = 0.15 , lowerCamelCase = 0.01 , lowerCamelCase = 13_48.0 , lowerCamelCase = 1E-5 , lowerCamelCase = 1 , ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = sigma_max
# setable values
lowercase__ : int = None
self.set_sigmas(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __a ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ : Union[str, Any] = torch.linspace(1 , lowerCamelCase , lowerCamelCase , device=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None ) -> str:
"""simple docstring"""
lowercase__ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase , lowerCamelCase )
lowercase__ : Optional[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ : List[str] = torch.exp(torch.linspace(math.log(lowerCamelCase ) , math.log(lowerCamelCase ) , lowerCamelCase ) )
lowercase__ : List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __a ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowercase__ : Any = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ : Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ : List[str] = timesteps.to(self.discrete_sigmas.device )
lowercase__ : Dict = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ : Union[str, Any] = self.get_adjacent_sigma(lowerCamelCase , lowerCamelCase ).to(sample.device )
lowercase__ : List[Any] = torch.zeros_like(lowerCamelCase )
lowercase__ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ : List[str] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ : Dict = diffusion.unsqueeze(-1 )
lowercase__ : List[str] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ : str = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase , device=sample.device , dtype=sample.dtype )
lowercase__ : List[str] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase , prev_sample_mean=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ : List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase__ : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase__ : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ : str = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ : Union[str, Any] = step_size.unsqueeze(-1 )
lowercase__ : List[Any] = sample + step_size * model_output
lowercase__ : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> torch.FloatTensor:
"""simple docstring"""
lowercase__ : str = timesteps.to(original_samples.device )
lowercase__ : Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ : Optional[int] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ : Tuple = noise + original_samples
return noisy_samples
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps | 298 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import * | 298 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a__ : int = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="maskformer"
_lowerCamelCase ={"hidden_size": "mask_feature_size"}
_lowerCamelCase =["resnet", "swin"]
_lowerCamelCase =["detr"]
def __init__( self : Tuple , a__ : int = 256 , a__ : int = 256 , a__ : float = 0.1 , a__ : bool = False , a__ : Optional[Dict] = None , a__ : Optional[Dict] = None , a__ : float = 0.02 , a__ : float = 1.0 , a__ : float = 1.0 , a__ : float = 1.0 , a__ : float = 20.0 , a__ : Optional[bool] = None , **a__ : List[Any] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(a__ , a__ ):
UpperCAmelCase = backbone_config.pop('''model_type''' )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase = (
decoder_config.pop('''model_type''' ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported )}" )
if isinstance(a__ , a__ ):
UpperCAmelCase = CONFIG_MAPPING[decoder_type]
UpperCAmelCase = config_class.from_dict(a__ )
UpperCAmelCase = backbone_config
UpperCAmelCase = decoder_config
# main feature dimension for the model
UpperCAmelCase = fpn_feature_size
UpperCAmelCase = mask_feature_size
# initializer
UpperCAmelCase = init_std
UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase = cross_entropy_weight
UpperCAmelCase = dice_weight
UpperCAmelCase = mask_weight
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = no_object_weight
UpperCAmelCase = output_auxiliary_logits
UpperCAmelCase = self.decoder_config.encoder_attention_heads
UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def __snake_case ( cls : Tuple , a__ : PretrainedConfig , a__ : PretrainedConfig , **a__ : List[str] ):
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.decoder_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 51 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Tuple , A : Union[str, Any] , A : List[str] , A : Any ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
__snake_case : Optional[int] = getattr(A , A )
if weight_type is not None:
__snake_case : Any = getattr(A , A ).shape
else:
__snake_case : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case : Optional[Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : List[Any] = value
elif weight_type == "bias":
__snake_case : List[str] = value
else:
__snake_case : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = []
__snake_case : int = fairseq_model.state_dict()
__snake_case : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
__snake_case : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Tuple = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__snake_case : List[str] = True
if "*" in mapped_key:
__snake_case : int = name.split(A )[0].split('.' )[-2]
__snake_case : List[str] = mapped_key.replace('*' , A )
if "weight_g" in name:
__snake_case : str = 'weight_g'
elif "weight_v" in name:
__snake_case : Tuple = 'weight_v'
elif "weight" in name:
__snake_case : Optional[int] = 'weight'
elif "bias" in name:
__snake_case : str = 'bias'
else:
__snake_case : List[str] = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Optional[int] , A : List[str] , A : int , A : int ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = full_name.split('conv_layers.' )[-1]
__snake_case : Optional[int] = name.split('.' )
__snake_case : str = int(items[0] )
__snake_case : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
def _SCREAMING_SNAKE_CASE ( A : Dict , A : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = SEWConfig()
if is_finetuned:
__snake_case : List[str] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Dict = model.cfg
__snake_case : Dict = fs_config.conv_bias
__snake_case : Tuple = eval(fs_config.conv_feature_layers )
__snake_case : Optional[Any] = [x[0] for x in conv_layers]
__snake_case : List[Any] = [x[1] for x in conv_layers]
__snake_case : str = [x[2] for x in conv_layers]
__snake_case : Tuple = 'gelu'
__snake_case : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__snake_case : List[Any] = 0.0
__snake_case : Tuple = fs_config.activation_fn.name
__snake_case : List[Any] = fs_config.encoder_embed_dim
__snake_case : Tuple = 0.02
__snake_case : List[Any] = fs_config.encoder_ffn_embed_dim
__snake_case : Optional[Any] = 1e-5
__snake_case : Optional[Any] = fs_config.encoder_layerdrop
__snake_case : Optional[Any] = fs_config.encoder_attention_heads
__snake_case : Union[str, Any] = fs_config.conv_pos_groups
__snake_case : Any = fs_config.conv_pos
__snake_case : Any = len(A )
__snake_case : str = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : int = model.cfg
__snake_case : Dict = fs_config.final_dropout
__snake_case : List[str] = fs_config.layerdrop
__snake_case : Union[str, Any] = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : Tuple = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : int = fs_config.mask_channel_length
__snake_case : Optional[Any] = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : Dict = fs_config.mask_prob
__snake_case : Tuple = 'Wav2Vec2FeatureExtractor'
__snake_case : List[str] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( A : int , A : List[str] , A : List[str]=None , A : List[Any]=None , A : Optional[int]=True ) -> List[Any]:
"""simple docstring"""
if is_finetuned:
__snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : List[str] = SEWConfig.from_pretrained(A )
else:
__snake_case : Optional[Any] = convert_config(model[0] , A )
__snake_case : Optional[int] = model[0].eval()
__snake_case : Tuple = True if config.feat_extract_norm == 'layer' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
if is_finetuned:
if dict_path:
__snake_case : Union[str, Any] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : str = target_dict.pad_index
__snake_case : Union[str, Any] = target_dict.bos_index
__snake_case : List[str] = target_dict.pad_index
__snake_case : Tuple = target_dict.bos_index
__snake_case : List[str] = target_dict.eos_index
__snake_case : Union[str, Any] = len(target_dict.symbols )
__snake_case : int = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , A )
__snake_case : List[str] = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
__snake_case : Optional[int] = SEWForCTC(A )
else:
__snake_case : Any = SEWModel(A )
feature_extractor.save_pretrained(A )
recursively_load_weights(A , A , A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 720 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( snake_case__ : Optional[int] , snake_case__ : Any=0.9_99 , snake_case__ : Any="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Tuple ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ : Optional[int] = []
for i in range(snake_case__ ):
snake_case__ : Optional[Any] = i / num_diffusion_timesteps
snake_case__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class snake_case ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
_lowerCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_lowerCAmelCase = 2
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.00_085 , lowerCamelCase = 0.012 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = "epsilon" , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = 1.0 , lowerCamelCase = "linspace" , lowerCamelCase = 0 , ) -> List[str]:
"""simple docstring"""
if trained_betas is not None:
snake_case__ : Any = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ : List[str] = torch.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ : Tuple = betas_for_alpha_bar(__SCREAMING_SNAKE_CASE , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
snake_case__ : Optional[int] = betas_for_alpha_bar(__SCREAMING_SNAKE_CASE , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ : Union[str, Any] = 1.0 - self.betas
snake_case__ : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = use_karras_sigmas
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
if schedule_timesteps is None:
snake_case__ : Tuple = self.timesteps
snake_case__ : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ : Optional[Any] = 1 if len(__SCREAMING_SNAKE_CASE ) > 1 else 0
else:
snake_case__ : int = timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
snake_case__ : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , ) -> torch.FloatTensor:
"""simple docstring"""
snake_case__ : Tuple = self.index_for_timestep(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.sigmas[step_index]
snake_case__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str = num_inference_steps
snake_case__ : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ : Tuple = np.linspace(0 , num_train_timesteps - 1 , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : Tuple = (np.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(__SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ : Dict = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : Optional[Any] = (np.arange(__SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(__SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ : str = np.log(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = np.interp(__SCREAMING_SNAKE_CASE , np.arange(0 , len(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
if self.config.use_karras_sigmas:
snake_case__ : Optional[int] = self._convert_to_karras(in_sigmas=__SCREAMING_SNAKE_CASE , num_inference_steps=self.num_inference_steps )
snake_case__ : int = np.array([self._sigma_to_t(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for sigma in sigmas] )
snake_case__ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ : Optional[Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE )
snake_case__ : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ : Optional[Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
# mps does not support float64
snake_case__ : List[Any] = timesteps.to(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
snake_case__ : Dict = timesteps.to(device=__SCREAMING_SNAKE_CASE )
# empty dt and derivative
snake_case__ : List[str] = None
snake_case__ : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ : Optional[Any] = defaultdict(__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = np.log(__SCREAMING_SNAKE_CASE )
# get distribution
snake_case__ : Optional[int] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ : Optional[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ : Optional[Any] = low_idx + 1
snake_case__ : Tuple = log_sigmas[low_idx]
snake_case__ : str = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ : Optional[Any] = (low - log_sigma) / (low - high)
snake_case__ : Union[str, Any] = np.clip(__SCREAMING_SNAKE_CASE , 0 , 1 )
# transform interpolation to time range
snake_case__ : int = (1 - w) * low_idx + w * high_idx
snake_case__ : Optional[Any] = t.reshape(sigma.shape )
return t
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> torch.FloatTensor:
"""simple docstring"""
snake_case__ : Any = in_sigmas[-1].item()
snake_case__ : str = in_sigmas[0].item()
snake_case__ : str = 7.0 # 7.0 is the value used in the paper
snake_case__ : List[Any] = np.linspace(0 , 1 , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = sigma_min ** (1 / rho)
snake_case__ : Union[str, Any] = sigma_max ** (1 / rho)
snake_case__ : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.dt is None
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ : Dict = self.index_for_timestep(__SCREAMING_SNAKE_CASE )
# advance index counter by 1
snake_case__ : int = timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ : Optional[Any] = self.sigmas[step_index]
snake_case__ : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ : List[Any] = self.sigmas[step_index - 1]
snake_case__ : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ : Optional[Any] = 0
snake_case__ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ : List[str] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ : int = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ : Tuple = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ : Dict = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ : Optional[int] = derivative
snake_case__ : List[str] = dt
snake_case__ : List[Any] = sample
else:
# 2. 2nd order / Heun's method
snake_case__ : int = (sample - pred_original_sample) / sigma_next
snake_case__ : List[Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ : List[str] = self.dt
snake_case__ : int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ : Optional[Any] = None
snake_case__ : List[str] = None
snake_case__ : str = None
snake_case__ : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> torch.FloatTensor:
"""simple docstring"""
snake_case__ : Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
# mps does not support float64
snake_case__ : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ : Dict = self.timesteps.to(original_samples.device )
snake_case__ : str = timesteps.to(original_samples.device )
snake_case__ : List[str] = [self.index_for_timestep(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for t in timesteps]
snake_case__ : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ : Optional[int] = sigma.unsqueeze(-1 )
snake_case__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 261 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="last" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) ->Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->str:
lowerCAmelCase = FlaubertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , lengths=__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , langs=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[int]:
lowerCAmelCase = FlaubertWithLMHeadModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Union[str, Any]:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[Any]:
lowerCAmelCase = FlaubertForQuestionAnswering(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , p_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , cls_index=__SCREAMING_SNAKE_CASE , is_impossible=__SCREAMING_SNAKE_CASE , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->str:
lowerCAmelCase = FlaubertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->List[str]:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : int = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
lowerCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , emb_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''' ) )
lowerCAmelCase = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''' ) , map_location=__SCREAMING_SNAKE_CASE )
loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE ) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 312 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase : Optional[int] =logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase : Dict ="cuda" if torch.cuda.is_available() else "cpu"
def A__ ( __A , __A=100 , __A=" " ):
'''simple docstring'''
_lowerCamelCase : Any = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else """""" )
texts.append(__A )
return {"title": titles, "text": texts}
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__A , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
_lowerCamelCase : Optional[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A__ ( __A , __A , __A , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowerCamelCase : str = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowerCamelCase : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowerCamelCase : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
_lowerCamelCase : List[str] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowerCamelCase : List[str] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
_lowerCamelCase : Dict = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
_lowerCamelCase : Optional[int] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowerCamelCase : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__A )
# And save the index
_lowerCamelCase : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
_snake_case = field(
default=__lowerCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
_snake_case = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
_snake_case = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
_snake_case = field(
default=str(Path(__lowerCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=__lowerCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
_snake_case = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
_snake_case = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase : List[Any] =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase : Dict =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase : List[str] =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _UpperCAmelCase :
def __init__( self : Dict , lowercase_ : Any , lowercase_ : Optional[int]=sys.maxsize ):
snake_case_ : str = '''bilinear'''
snake_case_ : Union[str, Any] = max_size
snake_case_ : List[Any] = short_edge_length
def __call__( self : Any , lowercase_ : Optional[int] ):
snake_case_ : int = []
for img in imgs:
snake_case_, snake_case_ : Union[str, Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case_ : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case_ : Optional[int] = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
snake_case_, snake_case_ : Dict = size, scale * w
else:
snake_case_, snake_case_ : Optional[int] = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
snake_case_ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
snake_case_ : str = newh * scale
snake_case_ : int = neww * scale
snake_case_ : Union[str, Any] = int(neww + 0.5 )
snake_case_ : Dict = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case_ : Tuple = Image.fromarray(snake_case__ )
snake_case_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
snake_case_ : List[str] = np.asarray(snake_case__ )
else:
snake_case_ : Tuple = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case_ : List[Any] = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class _UpperCAmelCase :
def __init__( self : List[str] , lowercase_ : List[Any] ):
snake_case_ : List[str] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
snake_case_ : str = cfg.INPUT.FORMAT
snake_case_ : List[Any] = cfg.SIZE_DIVISIBILITY
snake_case_ : Union[str, Any] = cfg.PAD_VALUE
snake_case_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
snake_case_ : Optional[int] = cfg.MODEL.DEVICE
snake_case_ : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ : Optional[Any] = lambda lowercase_ : (x - self.pixel_mean) / self.pixel_std
def _snake_case ( self : Any , lowercase_ : List[str] ):
snake_case_ : Dict = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
snake_case_ : List[str] = [im.shape[-2:] for im in images]
snake_case_ : Tuple = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : Any , lowercase_ : Any , lowercase_ : Any=False ):
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
snake_case_ : List[Any] = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
snake_case_ : Optional[int] = torch.tensor([im.shape[:2] for im in images] )
snake_case_ : int = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case_ : Union[str, Any] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
snake_case_, snake_case_ : Union[str, Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case_ : Optional[int] = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __lowercase ( _a , _a ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __lowercase ( _a , _a ):
assert torch.isfinite(_a ).all(), "Box tensor contains infinite or NaN!"
snake_case_, snake_case_ : str = box_size
tensor[:, 0].clamp_(min=0 , max=_a )
tensor[:, 1].clamp_(min=0 , max=_a )
tensor[:, 2].clamp_(min=0 , max=_a )
tensor[:, 3].clamp_(min=0 , max=_a )
| 123 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[int] = False
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return TrainCommand(lowerCAmelCase )
class UpperCamelCase_ ( a_ ):
@staticmethod
def UpperCamelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=snake_case__ , required=snake_case__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=snake_case__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=snake_case__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=snake_case__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=snake_case__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=snake_case__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=snake_case__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=snake_case__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=snake_case__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=snake_case__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=snake_case__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=snake_case__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=snake_case__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = logging.get_logger("""transformers-cli/training""" )
UpperCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=snake_case__ )
UpperCAmelCase = args.output
UpperCAmelCase = args.column_label
UpperCAmelCase = args.column_text
UpperCAmelCase = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
UpperCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
UpperCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
UpperCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase = args.validation_split
UpperCAmelCase = args.train_batch_size
UpperCAmelCase = args.valid_batch_size
UpperCAmelCase = args.learning_rate
UpperCAmelCase = args.adam_epsilon
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 673 | 0 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _a ( yaml.SafeLoader):
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCAmelCase = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys]
_UpperCAmelCase = Counter(__UpperCamelCase )
_UpperCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str=False )->Dict:
_UpperCAmelCase = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCAmelCase = full_content[1:].index('''---''' ) + 1
_UpperCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
# class attributes
UpperCamelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def lowercase__ ( cls : List[Any] , __UpperCamelCase : Path )->"DatasetMetadata":
with open(__UpperCamelCase , encoding='''utf-8''' ) as readme_file:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def lowercase__ ( self : Tuple , __UpperCamelCase : Path )->List[Any]:
if path.exists():
with open(__UpperCamelCase , encoding='''utf-8''' ) as readme_file:
_UpperCAmelCase = readme_file.read()
else:
_UpperCAmelCase = None
_UpperCAmelCase = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[str] = None )->str:
if readme_content is not None:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(__UpperCamelCase )
_UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def lowercase__ ( cls : str , __UpperCamelCase : str )->"DatasetMetadata":
_UpperCAmelCase = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def lowercase__ ( self : str )->str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
__A : str = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A : str = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__A : Union[str, Any] = ap.parse_args()
__A : Dict = Path(args.readme_filepath)
__A : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 95 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(_SCREAMING_SNAKE_CASE ) , b_binary.zfill(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase = F"""down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase = F"""up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase = F"""up_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
UpperCamelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase = 'mid_block.attentions.0.'
UpperCamelCase = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase = F"""mid_block.resnets.{j}."""
UpperCamelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Union[str, Any]:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
__UpperCamelCase : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__UpperCamelCase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__UpperCamelCase : Tuple = v.replace(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__UpperCamelCase : Union[str, Any] = v.replace(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Optional[Any] = v
__UpperCamelCase : Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0."""
UpperCamelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase = F"""mid_block.resnets.{i}."""
UpperCamelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
__UpperCamelCase : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__UpperCamelCase : str = v.replace(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : str = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__UpperCamelCase : List[Any] = v.replace(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = v
__UpperCamelCase : Optional[int] = {v: vae_state_dict[k] for k, v in mapping.items()}
__UpperCamelCase : Any = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
__UpperCamelCase : Union[str, Any] = reshape_weight_for_sd(__lowerCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase = {'q': 0, 'k': 1, 'v': 2}
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Dict:
__UpperCamelCase : Tuple = {}
__UpperCamelCase : int = {}
__UpperCamelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
__UpperCamelCase : Optional[int] = k[: -len(""".q_proj.weight""" )]
__UpperCamelCase : Union[str, Any] = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
__UpperCamelCase : Union[str, Any] = [None, None, None]
__UpperCamelCase : Union[str, Any] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
__UpperCamelCase : List[Any] = k[: -len(""".q_proj.bias""" )]
__UpperCamelCase : str = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
__UpperCamelCase : List[str] = [None, None, None]
__UpperCamelCase : Dict = v
continue
__UpperCamelCase : Optional[int] = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase )
__UpperCamelCase : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__UpperCamelCase : str = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase )
__UpperCamelCase : Tuple = torch.cat(__lowerCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__UpperCamelCase : Dict = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase )
__UpperCamelCase : List[str] = torch.cat(__lowerCAmelCase )
return new_state_dict
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> List[Any]:
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase = load_file(unet_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCamelCase = load_file(vae_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCamelCase = load_file(text_enc_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCamelCase = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCamelCase = convert_unet_state_dict(unet_state_dict)
UpperCamelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase = convert_vae_state_dict(vae_state_dict)
UpperCamelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 269 |
UpperCamelCase = 8.3_144_598
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCamelCase = 300
UpperCamelCase = 28
UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 269 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__lowerCAmelCase : Optional[List[bool]]
__lowerCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker | 396 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = []
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = resnets
lowercase = attentions
if self.add_downsample:
lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , a : Tuple , a : Union[str, Any] , a : Tuple , a : str=True ) -> Tuple:
"""simple docstring"""
lowercase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase = resnet(a , a , deterministic=a )
lowercase = attn(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
lowercase = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
if self.add_downsample:
lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , a : Tuple , a : List[Any] , a : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
lowercase = ()
for resnet in self.resnets:
lowercase = resnet(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
lowercase = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = []
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase = self.prev_output_channel if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = resnets
lowercase = attentions
if self.add_upsample:
lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , a : Optional[int] , a : Optional[int] , a : Optional[int] , a : List[str] , a : Dict=True ) -> List[Any]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase = res_hidden_states_tuple[-1]
lowercase = res_hidden_states_tuple[:-1]
lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase = resnet(a , a , deterministic=a )
lowercase = attn(a , a , deterministic=a )
if self.add_upsample:
lowercase = self.upsamplers_a(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase = self.prev_output_channel if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
if self.add_upsample:
lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , a : Any , a : Any , a : Tuple , a : Dict=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
lowercase = res_hidden_states_tuple[-1]
lowercase = res_hidden_states_tuple[:-1]
lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase = resnet(a , a , deterministic=a )
if self.add_upsample:
lowercase = self.upsamplers_a(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
# there is always at least one resnet
lowercase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase = []
for _ in range(self.num_layers ):
lowercase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
lowercase = attentions
def __call__( self : List[Any] , a : Optional[int] , a : Tuple , a : List[Any] , a : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
lowercase = self.resnets[0](a , a )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase = attn(a , a , deterministic=a )
lowercase = resnet(a , a , deterministic=a )
return hidden_states | 396 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int:
"""simple docstring"""
snake_case__ : str = set(range(3 , __lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , __lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __lowerCAmelCase , __lowerCAmelCase ) ) )
snake_case__ : Optional[Any] = [float(__lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(__lowerCAmelCase , limit + 1 , __lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
# load base model
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowerCAmelCase = load_file(lowercase )
__lowerCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.text_encoder
else:
__lowerCAmelCase = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
__lowerCAmelCase = pipeline.unet
# find the target layer
__lowerCAmelCase = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
__lowerCAmelCase = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
__lowerCAmelCase = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowerCAmelCase = layer_infos.pop(0 )
__lowerCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowerCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowerCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
__lowerCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_a : Optional[int] = parser.parse_args()
_a : Dict = args.base_model_path
_a : Optional[Any] = args.checkpoint_path
_a : Union[str, Any] = args.dump_path
_a : Optional[int] = args.lora_prefix_unet
_a : int = args.lora_prefix_text_encoder
_a : str = args.alpha
_a : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_a : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 | 0 |
class a__ ( __SCREAMING_SNAKE_CASE ):
pass
class a__ ( __SCREAMING_SNAKE_CASE ):
pass
class a__ :
def __init__( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: Any = [
[],
[],
[],
]
def lowerCAmelCase ( self : int , A_ : int , A_ : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__ :
def __init__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = []
def lowerCAmelCase ( self : List[str] , A_ : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCamelCase_: str = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.queue )
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase_ ( ):
lowerCamelCase_: str = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 713 | import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = RoFormerTokenizer
_A = RoFormerTokenizerFast
_A = True
_A = True
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
def lowerCAmelCase ( self : List[str] , **A_ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A_ )
def lowerCAmelCase ( self : Any , **A_ : Optional[int] ) -> Dict:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A_ )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = """永和服装饰品有限公司,今天天气非常好"""
lowerCamelCase_: int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: str = self.get_tokenizer()
lowerCamelCase_ , lowerCamelCase_: Union[str, Any] = self.get_chinese_input_output_texts()
lowerCamelCase_: Optional[int] = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , output_text.split() )
lowerCamelCase_: int = tokens + [tokenizer.unk_token]
lowerCamelCase_: List[str] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_: Any = self.get_rust_tokenizer()
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.get_chinese_input_output_texts()
lowerCamelCase_: Optional[int] = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , output_text.split() )
lowerCamelCase_: Dict = tokens + [tokenizer.unk_token]
lowerCamelCase_: List[str] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
| 584 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = XLNetTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = XLNetTokenizerFast
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = XLNetTokenizer(lowercase__ ,keep_accents=lowercase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) ,lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''<eod>''' )
self.assertEqual(len(lowercase__ ) ,1_0_0_6 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = XLNetTokenizer(lowercase__ ,keep_accents=lowercase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
__lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = XLNetTokenizer(lowercase__ ,do_lower_case=lowercase__ )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''▁he''', '''ll''', '''o'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = XLNetTokenizer(lowercase__ ,do_lower_case=lowercase__ )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
__lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ ,lowercase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
# fmt: off
__lowercase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """bert"""
def __init__( self : int , UpperCamelCase_ : List[str]=3_0522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Dict=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Union[str, Any]=512 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : Dict = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Tuple = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : str = position_embedding_type
_lowercase : Optional[int] = use_cache
_lowercase : str = classifier_dropout
class lowerCamelCase__ ( A ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str ) -> List[str]:
"""simple docstring"""
__A = RobertaPreLayerNormConfig.from_pretrained(
__A , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__A = torch.load(hf_hub_download(repo_id=__A , filename="""pytorch_model.bin""" ) )
__A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__A = 'roberta_prelayernorm.' + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__A = tensor_value
__A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__A , config=__A , state_dict=__A )
model.save_pretrained(__A )
# convert tokenizer
__A = AutoTokenizer.from_pretrained(__A )
tokenizer.save_pretrained(__A )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__a : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 637 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "M-CLIP"
def __init__( self , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=7_68 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = transformerDimSize
snake_case: Optional[int] = imageDimSize
super().__init__(**SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = MCLIPConfig
def __init__( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: str = XLMRobertaModel(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = self.transformer(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
snake_case: str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(SCREAMING_SNAKE_CASE__ ), embs | 329 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a_ = 50_00_00
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCAmelCase_ ( __a : datasets.Dataset , **__a : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = dataset.map(**__a )
@get_duration
def UpperCAmelCase_ ( __a : datasets.Dataset , **__a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = dataset.filter(**__a )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_lowerCamelCase : Union[str, Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
_lowerCamelCase : Dict = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a : Optional[int] ):
return tokenizer(examples['text'] )
_lowerCamelCase : Optional[Any] = map(__a )
_lowerCamelCase : Optional[Any] = map(__a , batched=__a )
_lowerCamelCase : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
_lowerCamelCase : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
_lowerCamelCase : Union[str, Any] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_lowerCamelCase : Tuple = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_lowerCamelCase : Tuple = map(__a , function=lambda __a : None , batched=__a )
_lowerCamelCase : int = map(__a , function=__a , batched=__a )
_lowerCamelCase : str = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 708 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : Dict , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase_ ( __a : Any , __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ''
for i in range(len(__a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase_ ( __a : List[Any] , __a : str ):
'''simple docstring'''
_lowerCamelCase : str = int('0b' + data[0] + data[-1] , 2 )
_lowerCamelCase : Optional[int] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase_ ( __a : Dict , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = message[:4]
_lowerCamelCase : Optional[Any] = message[4:]
_lowerCamelCase : Union[str, Any] = apply_table(__a , __a )
_lowerCamelCase : int = xor(__a , __a )
_lowerCamelCase : str = apply_sbox(__a , temp[:4] ) # noqa: E741
_lowerCamelCase : Any = apply_sbox(__a , temp[4:] )
_lowerCamelCase : Dict = '0' * (2 - len(__a )) + l # noqa: E741
_lowerCamelCase : Optional[Any] = '0' * (2 - len(__a )) + r
_lowerCamelCase : Tuple = apply_table(l + r , __a )
_lowerCamelCase : Tuple = xor(__a , __a )
return temp + right
if __name__ == "__main__":
a_ = input("""Enter 10 bit key: """)
a_ = input("""Enter 8 bit message: """)
a_ = [6, 3, 7, 4, 8, 5, 10, 9]
a_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ = [2, 4, 3, 1]
a_ = [2, 6, 3, 1, 4, 8, 5, 7]
a_ = [4, 1, 3, 5, 7, 2, 8, 6]
a_ = [4, 1, 2, 3, 2, 3, 4, 1]
a_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ = apply_table(key, paa_table)
a_ = temp[:5]
a_ = temp[5:]
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
# encryption
a_ = apply_table(message, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ = apply_table(CT, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 349 | 0 |
'''simple docstring'''
def __UpperCamelCase ( a : list ) ->list:
if len(a ) < 2:
return collection
def circle_sort_util(a : list , a : int , a : int ) -> bool:
snake_case = False
if low == high:
return swapped
snake_case = low
snake_case = high
while left < right:
if collection[left] > collection[right]:
snake_case , snake_case = (
collection[right],
collection[left],
)
snake_case = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case , snake_case = (
collection[right + 1],
collection[left],
)
snake_case = True
snake_case = low + int((high - low) / 2 )
snake_case = circle_sort_util(a , a , a )
snake_case = circle_sort_util(a , mid + 1 , a )
return swapped or left_swap or right_swap
snake_case = True
while is_not_sorted is True:
snake_case = circle_sort_util(a , 0 , len(a ) - 1 )
return collection
if __name__ == "__main__":
_lowercase = input('Enter numbers separated by a comma:\n').strip()
_lowercase = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 342 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Any:
snake_case = '''ylacombe/bark-small'''
snake_case = tempfile.mkdtemp()
snake_case = '''en_speaker_1'''
snake_case = '''This is a test string'''
snake_case = '''speaker_embeddings_path.json'''
snake_case = '''speaker_embeddings'''
def UpperCamelCase ( self , **A__ ) -> int:
return AutoTokenizer.from_pretrained(self.checkpoint , **A__ )
def UpperCamelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_tokenizer()
snake_case = BarkProcessor(tokenizer=A__ )
processor.save_pretrained(self.tmpdirname )
snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase ( self ) -> Tuple:
snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case = 35
snake_case = 2
snake_case = 8
snake_case = {
'''semantic_prompt''': np.ones(A__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case = processor(text=self.input_string , voice_preset=A__ )
snake_case = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(A__ , **A__ )
snake_case = processor(text=self.input_string , voice_preset=A__ )
snake_case = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase ( self ) -> int:
snake_case = self.get_tokenizer()
snake_case = BarkProcessor(tokenizer=A__ )
snake_case = processor(text=self.input_string )
snake_case = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=A__ , return_attention_mask=A__ , return_token_type_ids=A__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase: Optional[int] = TypeVar('KEY')
lowerCAmelCase: Tuple = TypeVar('VAL')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class a__( Generic[KEY, VAL] ):
_a = 42
_a = 42
class a__( _Item ):
def __init__( self : Any ):
super().__init__(__snake_case , __snake_case )
def __bool__( self : int ):
return False
lowerCAmelCase: Any = _DeletedItem()
class a__( MutableMapping[KEY, VAL] ):
def __init__( self : int , __snake_case : int = 8 , __snake_case : float = 0.75 ):
a : Any = initial_block_size
a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
a : int = capacity_factor
a : str = 0
def lowercase_ ( self : Any , __snake_case : KEY ):
return hash(__snake_case ) % len(self._buckets )
def lowercase_ ( self : Optional[Any] , __snake_case : int ):
return (ind + 1) % len(self._buckets )
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : KEY , __snake_case : VAL ):
a : List[str] = self._buckets[ind]
if not stored:
a : Optional[int] = _Item(__snake_case , __snake_case )
self._len += 1
return True
elif stored.key == key:
a : Optional[int] = _Item(__snake_case , __snake_case )
return True
else:
return False
def lowercase_ ( self : Union[str, Any] ):
a : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__snake_case )
def lowercase_ ( self : Dict ):
if len(self._buckets ) <= self._initial_block_size:
return False
a : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self : Optional[Any] , __snake_case : int ):
a : Tuple = self._buckets
a : Any = [None] * new_size
a : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self : Tuple ):
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self : Dict ):
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self : List[str] , __snake_case : KEY ):
a : Tuple = self._get_bucket_index(__snake_case )
for _ in range(len(self._buckets ) ):
yield ind
a : str = self._get_next_ind(__snake_case )
def lowercase_ ( self : Any , __snake_case : KEY , __snake_case : VAL ):
for ind in self._iterate_buckets(__snake_case ):
if self._try_set(__snake_case , __snake_case , __snake_case ):
break
def __setitem__( self : str , __snake_case : KEY , __snake_case : VAL ):
if self._is_full():
self._size_up()
self._add_item(__snake_case , __snake_case )
def __delitem__( self : str , __snake_case : KEY ):
for ind in self._iterate_buckets(__snake_case ):
a : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__snake_case )
if item is _deleted:
continue
if item.key == key:
a : str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , __snake_case : KEY ):
for ind in self._iterate_buckets(__snake_case ):
a : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__snake_case )
def __len__( self : Tuple ):
return self._len
def __iter__( self : Union[str, Any] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
a : List[str] = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 717 |
'''simple docstring'''
def lowerCamelCase__ ( _A = 6008_5147_5143 ):
try:
a : Optional[int] = int(_A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a : Any = 1
a : Union[str, Any] = 2
while i * i <= n:
while n % i == 0:
a : str = i
n //= i
i += 1
if n > 1:
a : Any = n
return int(_A )
if __name__ == "__main__":
print(F"{solution() = }") | 195 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : int = image.size
lowerCAmelCase , lowerCAmelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase : List[str] = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase : Optional[int] = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
lowerCAmelCase : int = image[None].transpose(0, 3, 1, 2 )
lowerCAmelCase : Optional[Any] = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class __A ( lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : VQModel , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 100 , UpperCAmelCase_ : Optional[float] = 0.0 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ):
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
lowerCAmelCase : str = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase_ )}" )
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase : int = preprocess(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : str = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase : List[str] = next(self.unet.parameters() ).dtype
lowerCAmelCase : Tuple = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
lowerCAmelCase : Dict = image.to(device=self.device , dtype=UpperCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
lowerCAmelCase : List[str] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : Optional[Any] = eta
for t in self.progress_bar(UpperCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase : Optional[Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase : Union[str, Any] = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
lowerCAmelCase : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : List[Any] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase : Optional[int] = self.vqvae.decode(UpperCAmelCase_ ).sample
lowerCAmelCase : Dict = torch.clamp(UpperCAmelCase_ , -1.0 , 1.0 )
lowerCAmelCase : str = image / 2 + 0.5
lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 343 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : jnp.ndarray
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase_ : Union[bool, Tuple[bool]] = False
lowerCAmelCase_ : Tuple[int] = (320, 640, 1280, 1280)
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Union[int, Tuple[int]] = 8
lowerCAmelCase_ : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase_ : int = 1280
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : bool = False
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : jax.random.KeyArray ):
# init input tensors
lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase : int = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
lowerCAmelCase : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase : Tuple = jax.random.split(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"]
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.block_out_channels
lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase : int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase : Tuple = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype )
lowerCAmelCase : Union[str, Any] = self.only_cross_attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase : Tuple = output_channel
lowerCAmelCase : Optional[int] = block_out_channels[i]
lowerCAmelCase : int = i == len(UpperCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase : str = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase : Any = FlaxDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = down_blocks
# mid
lowerCAmelCase : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : int = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[int] = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCAmelCase : Union[str, Any] = output_channel
lowerCAmelCase : Dict = reversed_block_out_channels[i]
lowerCAmelCase : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase_ ) - 1 )]
lowerCAmelCase : Optional[Any] = i == len(UpperCAmelCase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase : List[str] = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase : Tuple = FlaxUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_channel
lowerCAmelCase : Any = up_blocks
# out
lowerCAmelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , ):
# 1. time
if not isinstance(UpperCAmelCase_ , jnp.ndarray ):
lowerCAmelCase : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase : List[str] = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase : List[Any] = jnp.expand_dims(UpperCAmelCase_ , 0 )
lowerCAmelCase : Tuple = self.time_proj(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.time_embedding(UpperCAmelCase_ )
# 2. pre-process
lowerCAmelCase : str = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
lowerCAmelCase : Optional[Any] = self.conv_in(UpperCAmelCase_ )
# 3. down
lowerCAmelCase : int = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase : int = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase_ , UpperCAmelCase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase : Dict = new_down_block_res_samples
# 4. mid
lowerCAmelCase : List[Any] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase : str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[str] = up_block(
UpperCAmelCase_ , temb=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train , )
else:
lowerCAmelCase : int = up_block(UpperCAmelCase_ , temb=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train )
# 6. post-process
lowerCAmelCase : Optional[Any] = self.conv_norm_out(UpperCAmelCase_ )
lowerCAmelCase : Dict = nn.silu(UpperCAmelCase_ )
lowerCAmelCase : Dict = self.conv_out(UpperCAmelCase_ )
lowerCAmelCase : List[str] = jnp.transpose(UpperCAmelCase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase_ )
| 343 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
A : List[str] = logging.getLogger(__name__)
A : Any = {"""facebook/bart-base""": BartForConditionalGeneration}
A : Optional[int] = {"""facebook/bart-base""": BartTokenizer}
def _a ( ):
snake_case : List[Any] =argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=lowerCamelCase_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCamelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCamelCase_ , )
parser.add_argument(
'''--config_name''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=lowerCamelCase_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Where to store the final ONNX file.''' )
snake_case : Union[str, Any] =parser.parse_args()
return args
def _a ( lowerCamelCase_ , lowerCamelCase_="cpu" ):
snake_case : Optional[Any] =model_dict[model_name].from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
snake_case : int =tokenizer_dict[model_name].from_pretrained(lowerCamelCase_ )
if model_name in ["facebook/bart-base"]:
snake_case : int =0
snake_case : List[str] =None
snake_case : List[Any] =0
return huggingface_model, tokenizer
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
model.eval()
snake_case : str =None
snake_case : int =torch.jit.script(BARTBeamSearchGenerator(lowerCamelCase_ ) )
with torch.no_grad():
snake_case : Union[str, Any] ='''My friends are cool but they eat too many carbs.'''
snake_case : Optional[int] =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
snake_case : Any =model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=lowerCamelCase_ , max_length=lowerCamelCase_ , early_stopping=lowerCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCamelCase_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCamelCase_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=lowerCamelCase_ , )
logger.info('''Model exported to {}'''.format(lowerCamelCase_ ) )
snake_case : Dict =remove_dup_initializers(os.path.abspath(lowerCamelCase_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(lowerCamelCase_ ) )
snake_case : Optional[int] =onnxruntime.InferenceSession(lowerCamelCase_ )
snake_case : Optional[int] =ort_sess.run(
lowerCamelCase_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(lowerCamelCase_ ),
'''max_length''': np.array(lowerCamelCase_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _a ( ):
snake_case : int =parse_args()
snake_case : Optional[Any] =5
snake_case : List[str] =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case : int =torch.device(args.device )
snake_case , snake_case : Tuple =load_model_tokenizer(args.model_name_or_path , lowerCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(lowerCamelCase_ )
if args.max_length:
snake_case : List[str] =args.max_length
if args.num_beams:
snake_case : List[Any] =args.num_beams
if args.output_file_path:
snake_case : Union[str, Any] =args.output_file_path
else:
snake_case : List[str] ='''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 136 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A : int = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = GPTSwaTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : int =GPTSwaTokenizer(_snake_case, eos_token='''<unk>''', bos_token='''<unk>''', pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any], _snake_case : Tuple ):
'''simple docstring'''
snake_case : int ='''This is a test'''
snake_case : Union[str, Any] ='''This is a test'''
return input_text, output_text
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Dict ='''<s>'''
snake_case : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<unk>''' )
self.assertEqual(vocab_keys[1], '''<s>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(_snake_case ), 2_000 )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 2_000 )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : List[Any] =GPTSwaTokenizer(_snake_case )
snake_case : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [465, 287, 265, 631, 842] )
snake_case : List[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
_snake_case, ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''], )
# fmt: on
snake_case : List[Any] =tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case, [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], )
snake_case : Optional[Any] =tokenizer.convert_ids_to_tokens(_snake_case )
# fmt: off
self.assertListEqual(
_snake_case, ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : int =GPTSwaTokenizer(_snake_case )
snake_case : Dict =['''This is a test''', '''I was born in 92000, and this is falsé.''']
snake_case : Union[str, Any] =[
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_snake_case, _snake_case ):
self.assertListEqual(tokenizer.encode_fast(_snake_case ), _snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(_snake_case, _snake_case ):
self.assertEqual(tokenizer.decode_fast(_snake_case ), _snake_case )
@slow
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] =[
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
snake_case : int ={'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''AI-Sweden/gpt-sw3-126m''', sequences=_snake_case, )
| 136 | 1 |
def _UpperCAmelCase ( UpperCamelCase: Dict , UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = [1]
for i in range(2 , __UpperCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCAmelCase = []
__lowerCAmelCase = list(range(__UpperCamelCase ) )
# Find permutation
while factorials:
__lowerCAmelCase = factorials.pop()
__lowerCAmelCase = divmod(__UpperCamelCase , __UpperCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> tuple[int, int]:
"""simple docstring"""
try:
lowerCAmelCase_ : Tuple = float(__UpperCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
lowerCAmelCase_ : Dict = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
lowerCAmelCase_ : Optional[int] = len(str(__UpperCamelCase ).split("." )[1] )
lowerCAmelCase_ : List[Any] = int(decimal * (10**number_of_frac_digits) )
lowerCAmelCase_ : List[str] = 10**number_of_frac_digits
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = denominator, numerator
while True:
lowerCAmelCase_ : List[Any] = dividend % divisor
if remainder == 0:
break
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = divisor, remainder
lowerCAmelCase_ , lowerCAmelCase_ : str = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 610 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase = '''
import os
'''
UpperCAmelCase = '''
def foo():
import os
return False
'''
UpperCAmelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCAmelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCAmelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCAmelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCAmelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCAmelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCAmelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCAmelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCAmelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'test_file.py' )
with open(__SCREAMING_SNAKE_CASE , 'w' ) as _tmp_file:
_tmp_file.write(__SCREAMING_SNAKE_CASE )
lowercase = get_imports(__SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 565 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if "model" in orig_key:
lowercase = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
lowercase = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
lowercase = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
lowercase = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
lowercase = orig_key.split('.' )[0].split('_' )[-1]
lowercase = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowercase = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
lowercase = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
lowercase = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
lowercase = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
lowercase = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
lowercase = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
lowercase = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
lowercase = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
lowercase = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
lowercase = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
lowercase = 'yoso.' + orig_key
return orig_key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase = val
lowercase = orig_state_dict['cls.predictions.decoder.bias']
lowercase = torch.arange(__SCREAMING_SNAKE_CASE ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model_state_dict']
lowercase = YosoConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase = YosoForMaskedLM(__SCREAMING_SNAKE_CASE )
lowercase = convert_checkpoint_helper(config.max_position_embeddings , __SCREAMING_SNAKE_CASE )
print(model.load_state_dict(__SCREAMING_SNAKE_CASE ) )
model.eval()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 565 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
A__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = None
a__ = None
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """train"""
a__ = """dev"""
a__ = """test"""
class UpperCAmelCase_ :
'''simple docstring'''
@staticmethod
def _lowercase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowercase ( UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowercase ( UpperCamelCase__ : List[InputExample] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple="[CLS]" , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Optional[Any]=-100 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[int]=True , ) -> List[InputFeatures]:
"""simple docstring"""
__magic_name__ = {label: i for i, label in enumerate(UpperCamelCase__ )}
__magic_name__ = []
for ex_index, example in enumerate(UpperCamelCase__ ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" , UpperCamelCase__ , len(UpperCamelCase__ ) )
__magic_name__ = []
__magic_name__ = []
for word, label in zip(example.words , example.labels ):
__magic_name__ = tokenizer.tokenize(UpperCamelCase__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCamelCase__ ) > 0:
tokens.extend(UpperCamelCase__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCamelCase__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__ = tokenizer.num_special_tokens_to_add()
if len(UpperCamelCase__ ) > max_seq_length - special_tokens_count:
__magic_name__ = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__ = [sequence_a_segment_id] * len(UpperCamelCase__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__ = [cls_token] + tokens
__magic_name__ = [pad_token_label_id] + label_ids
__magic_name__ = [cls_token_segment_id] + segment_ids
__magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__ = [1 if mask_padding_with_zero else 0] * len(UpperCamelCase__ )
# Zero-pad up to the sequence length.
__magic_name__ = max_seq_length - len(UpperCamelCase__ )
if pad_on_left:
__magic_name__ = ([pad_token] * padding_length) + input_ids
__magic_name__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__ = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCamelCase__ ) == max_seq_length
assert len(UpperCamelCase__ ) == max_seq_length
assert len(UpperCamelCase__ ) == max_seq_length
assert len(UpperCamelCase__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(UpperCamelCase__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(UpperCamelCase__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(UpperCamelCase__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(UpperCamelCase__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(UpperCamelCase__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ = None
features.append(
InputFeatures(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , label_ids=UpperCamelCase__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
a__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[Any] , UpperCamelCase__ : TokenClassificationTask , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Split = Split.train , ) -> str:
"""simple docstring"""
__magic_name__ = os.path.join(
UpperCamelCase__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(UpperCamelCase__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
__magic_name__ = torch.load(UpperCamelCase__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
__magic_name__ = token_classification_task.read_examples_from_file(UpperCamelCase__ , UpperCamelCase__ )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ = token_classification_task.convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , UpperCamelCase__ )
def __len__( self : List[str] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , UpperCamelCase__ : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = -1_00
def __init__( self : List[str] , UpperCamelCase__ : TokenClassificationTask , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Split = Split.train , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = token_classification_task.read_examples_from_file(UpperCamelCase__ , UpperCamelCase__ )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ = token_classification_task.convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ = tf.data.Dataset.from_generator(
UpperCamelCase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__ = tf.data.Dataset.from_generator(
UpperCamelCase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 529 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
lowercase__ :Dict = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : int ='''xlnet'''
lowercase_ : List[str] =['''mems''']
lowercase_ : Union[str, Any] ={
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,A__=3_2_0_0_0 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=True ,A__="bi" ,A__=0.02 ,A__=1E-12 ,A__=0.1 ,A__=5_1_2 ,A__=None ,A__=True ,A__=False ,A__=False ,A__=-1 ,A__=False ,A__="last" ,A__=True ,A__="tanh" ,A__=0.1 ,A__=5 ,A__=5 ,A__=5 ,A__=1 ,A__=2 ,**A__ ,):
lowercase = vocab_size
lowercase = d_model
lowercase = n_layer
lowercase = n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0')
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})')
lowercase = d_model // n_head
lowercase = ff_activation
lowercase = d_inner
lowercase = untie_r
lowercase = attn_type
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = dropout
lowercase = mem_len
lowercase = reuse_len
lowercase = bi_data
lowercase = clamp_len
lowercase = same_length
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_last_dropout
lowercase = start_n_top
lowercase = end_n_top
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' ,A__ ,)
lowercase = kwargs['''use_cache''']
lowercase = use_mems_eval
lowercase = use_mems_train
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
@property
def A__ ( self):
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.')
return -1
@max_position_embeddings.setter
def A__ ( self ,A__):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.')
| 633 |
import argparse
from collections import defaultdict
import yaml
lowercase__ :Optional[int] = "docs/source/en/_toctree.yml"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase = [key for key, value in counts.items() if value > 1]
lowercase = []
for duplicate_key in duplicates:
lowercase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
lowercase = yaml.safe_load(f.read() )
# Get to the API doc
lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase = content[api_idx]['''sections''']
# Then to the model doc
lowercase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase = api_doc[model_idx]['''sections''']
lowercase = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if '''sections''' in section]
lowercase = False
for idx, modality_doc in modalities_docs:
lowercase = modality_doc['''sections''']
lowercase = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase = True
if overwrite:
lowercase = new_modality_doc
if diff:
if overwrite:
lowercase = model_doc
lowercase = api_doc
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ :int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 633 | 1 |
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(__lowercase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(__lowercase , __lowercase ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
assert isinstance(__lowercase , __lowercase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowercase ) ):
for j in range(i + 1 , len(__lowercase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(__lowercase , __lowercase ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__lowercase ):
ans.append(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
assert isinstance(__lowercase , __lowercase ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(__lowercase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowercase ):
while quotient != 1:
if is_prime(__lowercase ) and (quotient % factor == 0):
ans.append(__lowercase )
quotient /= factor
else:
factor += 1
else:
ans.append(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : int ):
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(__lowercase )
__lowercase = max(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(__lowercase , __lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(__lowercase )
__lowercase = min(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ):
assert isinstance(__lowercase , __lowercase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __lowercase ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
assert isinstance(__lowercase , __lowercase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __lowercase ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
assert (
isinstance(__lowercase , __lowercase ) and (number > 2) and is_even(__lowercase )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(__lowercase )
__lowercase = len(__lowercase )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__lowercase , __lowercase )
and (len(__lowercase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(__lowercase , __lowercase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(__lowercase )
__lowercase = prime_factorization(__lowercase )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(__lowercase , __lowercase )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(__lowercase )
__lowercase = prime_fac_a.count(__lowercase )
for _ in range(max(__lowercase , __lowercase ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(__lowercase )
for _ in range(__lowercase ):
ans *= n
done.append(__lowercase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(__lowercase )
for _ in range(__lowercase ):
ans *= n
done.append(__lowercase )
# precondition
assert isinstance(__lowercase , __lowercase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
assert isinstance(__lowercase , __lowercase ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowercase ):
ans += 1
# precondition
assert isinstance(__lowercase , __lowercase ) and is_prime(
__lowercase ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ):
assert (
is_prime(__lowercase ) and is_prime(__lowercase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowercase ):
number += 1
while number < p_number_a:
ans.append(__lowercase )
number += 1
# fetch the next prime number.
while not is_prime(__lowercase ):
number += 1
# precondition
assert (
isinstance(__lowercase , __lowercase )
and ans[0] != p_number_a
and ans[len(__lowercase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
assert isinstance(__lowercase , __lowercase ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__lowercase )
# precondition
assert ans[0] == 1 and ans[len(__lowercase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
assert isinstance(__lowercase , __lowercase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(__lowercase )
# precondition
assert (
isinstance(__lowercase , __lowercase )
and (divisors[0] == 1)
and (divisors[len(__lowercase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ):
assert (
isinstance(__lowercase , __lowercase )
and isinstance(__lowercase , __lowercase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(__lowercase ) , abs(__lowercase ) )
# precondition
assert (
isinstance(__lowercase , __lowercase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
assert isinstance(__lowercase , __lowercase ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
assert isinstance(__lowercase , __lowercase ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 502 |
"""simple docstring"""
def _A ( __lowercase = 200_0000 ):
"""simple docstring"""
lowerCamelCase__ = [0 for i in range(n + 1 )]
lowerCamelCase__ = 1
lowerCamelCase__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowercase ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for i in range(__lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 129 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def A_ ( A__ , A__ ) -> Optional[int]:
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A_ ( A__ , A__ , A__ , A__ ) -> int:
a__ : Union[str, Any] = tmp_path / 'cache'
a__ : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ : Optional[Any] = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_sql_dataset(A__ , A__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A_ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
a__ : Any = tmp_path / 'cache'
a__ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ : Optional[Any] = features.copy() if features else default_expected_features
a__ : Any = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ : Any = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=A__ , cache_dir=A__ ).read()
_check_sql_dataset(A__ , A__ )
def A_ ( A__ ) -> Dict:
with contextlib.closing(sqlitea.connect(A__ ) ) as con:
a__ : Optional[int] = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def A_ ( A__ , A__ , A__ ) -> Optional[int]:
a__ : List[str] = tmp_path / 'cache'
a__ : Dict = os.path.join(A__ , 'tmp.sql' )
a__ : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ ).read()
SqlDatasetWriter(A__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
a__ : Union[str, Any] = iter_sql_file(A__ )
a__ : int = iter_sql_file(A__ )
for rowa, rowa in zip(A__ , A__ ):
assert rowa == rowa
@require_sqlalchemy
def A_ ( A__ , A__ , A__ ) -> List[str]:
a__ : List[Any] = tmp_path / 'cache'
a__ : List[str] = os.path.join(A__ , 'tmp.sql' )
a__ : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ ).read()
SqlDatasetWriter(A__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
a__ : Dict = iter_sql_file(A__ )
a__ : List[str] = iter_sql_file(A__ )
for rowa, rowa in zip(A__ , A__ ):
assert rowa == rowa
@require_sqlalchemy
def A_ ( A__ , A__ , A__ ) -> List[str]:
a__ : Optional[Any] = tmp_path / 'cache'
a__ : Optional[Any] = os.path.join(A__ , 'tmp.sql' )
a__ : List[str] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=A__ ).read()
with pytest.raises(A__ ):
SqlDatasetWriter(A__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 392 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : int = XLMRobertaModel.from_pretrained('xlm-roberta-base')
a__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
a__ : int = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
a__ : int = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ : Tuple = model(lowercase)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase , atol=1e-3))
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = XLMRobertaModel.from_pretrained('xlm-roberta-large')
a__ : str = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
a__ : List[Any] = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
a__ : List[Any] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ : Union[str, Any] = model(lowercase)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase , atol=1e-3))
| 392 | 1 |
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = int(a__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(a__ )
_UpperCamelCase = divmod(a__ , 2 )
return binary_recursive(a__ ) + str(a__ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = str(a__ ).strip()
if not number:
raise ValueError("No input value was provided" )
_UpperCamelCase = '''-''' if number.startswith("-" ) else ''''''
_UpperCamelCase = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(a__ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 547 | def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowercase = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 167 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
A_ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
A_ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE:Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE:str = numpy_to_pil(snake_case )
return images
def A_ ( snake_case ):
if images.ndim == 3:
SCREAMING_SNAKE_CASE:List[str] = images[None, ...]
SCREAMING_SNAKE_CASE:List[Any] = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE:Any = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
SCREAMING_SNAKE_CASE:int = [Image.fromarray(snake_case ) for image in images]
return pil_images
| 704 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( _a ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=13 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Any=99 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=32 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : Optional[int]=12 ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : Dict="last" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = parent
SCREAMING_SNAKE_CASE:Dict = batch_size
SCREAMING_SNAKE_CASE:int = seq_length
SCREAMING_SNAKE_CASE:Any = is_training
SCREAMING_SNAKE_CASE:Union[str, Any] = use_input_lengths
SCREAMING_SNAKE_CASE:List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE:Dict = use_labels
SCREAMING_SNAKE_CASE:Dict = gelu_activation
SCREAMING_SNAKE_CASE:Dict = sinusoidal_embeddings
SCREAMING_SNAKE_CASE:List[str] = causal
SCREAMING_SNAKE_CASE:List[Any] = asm
SCREAMING_SNAKE_CASE:List[str] = n_langs
SCREAMING_SNAKE_CASE:Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = n_special
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:Dict = num_hidden_layers
SCREAMING_SNAKE_CASE:Dict = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE:List[str] = type_vocab_size
SCREAMING_SNAKE_CASE:int = type_sequence_label_size
SCREAMING_SNAKE_CASE:Any = initializer_range
SCREAMING_SNAKE_CASE:str = num_labels
SCREAMING_SNAKE_CASE:Dict = num_choices
SCREAMING_SNAKE_CASE:Any = summary_type
SCREAMING_SNAKE_CASE:int = use_proj
SCREAMING_SNAKE_CASE:int = scope
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE:Tuple = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE:List[Any] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE:Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE:Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
SCREAMING_SNAKE_CASE:Dict = None
SCREAMING_SNAKE_CASE:List[Any] = None
SCREAMING_SNAKE_CASE:List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE:int = ids_tensor([self.batch_size] ,2 ).float()
SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE:List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self : Optional[int] ):
return FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,)
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = FlaubertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,lengths=SCREAMING_SNAKE_CASE__ ,langs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,langs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,):
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,):
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = model(SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,):
SCREAMING_SNAKE_CASE:str = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = model(
SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,cls_index=SCREAMING_SNAKE_CASE__ ,is_impossible=SCREAMING_SNAKE_CASE__ ,p_mask=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:str = model(
SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,cls_index=SCREAMING_SNAKE_CASE__ ,is_impossible=SCREAMING_SNAKE_CASE__ ,)
((SCREAMING_SNAKE_CASE) , ):List[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE) , ):List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,):
SCREAMING_SNAKE_CASE:List[Any] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:List[str] = self.num_labels
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:List[str] = self.num_choices
SCREAMING_SNAKE_CASE:Any = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:int = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
):Tuple = config_and_inputs
SCREAMING_SNAKE_CASE:Tuple = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , unittest.TestCase ):
_A : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int=False ):
SCREAMING_SNAKE_CASE:Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE:Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE:Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,emb_dim=37 )
def __UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE__ )
@slow
def __UpperCamelCase ( self : List[str] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Tuple = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE:Union[str, Any] = True
SCREAMING_SNAKE_CASE:Optional[int] = model_class(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = torch.jit.trace(
SCREAMING_SNAKE_CASE__ ,(inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,"traced_model.pt" ) )
SCREAMING_SNAKE_CASE:Optional[Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ ,"traced_model.pt" ) ,map_location=SCREAMING_SNAKE_CASE__ )
loaded(inputs_dict["input_ids"].to(SCREAMING_SNAKE_CASE__ ) ,inputs_dict["attention_mask"].to(SCREAMING_SNAKE_CASE__ ) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
| 465 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCamelCase : Optional[Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _lowercase :
_a : Tuple = PegasusConfig
_a : Optional[int] = {}
_a : int = '''gelu'''
def __init__( self , a , a=1_3 , a=7 , a=True , a=False , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a=0.1 , a=0.1 , a=2_0 , a=2 , a=1 , a=0 , ):
snake_case__ : List[str] =parent
snake_case__ : List[Any] =batch_size
snake_case__ : Dict =seq_length
snake_case__ : Tuple =is_training
snake_case__ : Optional[int] =use_labels
snake_case__ : int =vocab_size
snake_case__ : List[Any] =hidden_size
snake_case__ : Optional[int] =num_hidden_layers
snake_case__ : Tuple =num_attention_heads
snake_case__ : Any =intermediate_size
snake_case__ : Any =hidden_dropout_prob
snake_case__ : List[Any] =attention_probs_dropout_prob
snake_case__ : Dict =max_position_embeddings
snake_case__ : Dict =eos_token_id
snake_case__ : int =pad_token_id
snake_case__ : Tuple =bos_token_id
def lowercase__ ( self ):
snake_case__ : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
snake_case__ : int =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : List[Any] =np.concatenate([input_ids, eos_tensor] , axis=1 )
snake_case__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : Any =prepare_pegasus_inputs_dict(a , a , a )
return config, inputs_dict
def lowercase__ ( self , a , a , a ):
snake_case__ : Any =2_0
snake_case__ : str =model_class_name(a )
snake_case__ : Optional[int] =model.encode(inputs_dict["""input_ids"""] )
snake_case__ : str =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case__ : List[str] =model.init_cache(decoder_input_ids.shape[0] , a , a )
snake_case__ : Optional[Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case__ : Union[str, Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ : Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
snake_case__ : int =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case__ : List[str] =model.decode(
decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , )
snake_case__ : str =model.decode(a , a )
snake_case__ : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def lowercase__ ( self , a , a , a ):
snake_case__ : str =2_0
snake_case__ : int =model_class_name(a )
snake_case__ : Union[str, Any] =model.encode(inputs_dict["""input_ids"""] )
snake_case__ : int =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case__ : Tuple =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case__ : Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , a , a )
snake_case__ : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case__ : Optional[int] =model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
snake_case__ : Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case__ : str =model.decode(
decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , )
snake_case__ : Any =model.decode(a , a , decoder_attention_mask=a )
snake_case__ : int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def A__ ( _a : Any , _a : Any , _a : Any , _a : Union[str, Any]=None , _a : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case__ : List[str] =np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
snake_case__ : Any =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _lowercase ( _A , unittest.TestCase ):
_a : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_a : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_a : str = True
_a : Union[str, Any] = False
_a : Tuple = False
_a : str = False
def lowercase__ ( self ):
snake_case__ : Optional[int] =FlaxPegasusModelTester(self )
snake_case__ : Union[str, Any] =ConfigTester(self , config_class=a )
def lowercase__ ( self ):
self.config_tester.run_common_tests()
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a , a , a )
def lowercase__ ( self ):
snake_case__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a )
def lowercase__ ( self ):
snake_case__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : str =self._prepare_for_class(a , a )
snake_case__ : Optional[int] =model_class(a )
@jax.jit
def encode_jitted(a , a=None , **a ):
return model.encode(input_ids=a , attention_mask=a )
with self.subTest("""JIT Enabled""" ):
snake_case__ : int =encode_jitted(**a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case__ : int =encode_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self ):
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : List[str] =model_class(a )
snake_case__ : Optional[Any] =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
snake_case__ : Optional[int] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(a , a , a ):
return model.decode(
decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , )
with self.subTest("""JIT Enabled""" ):
snake_case__ : List[str] =decode_jitted(**a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case__ : Tuple =decode_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ : Union[str, Any] =model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=a )
snake_case__ : Tuple =np.ones((1, 1) )
snake_case__ : str =model(a )
self.assertIsNotNone(a )
@slow
def lowercase__ ( self ):
snake_case__ : Optional[int] =FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
snake_case__ : Dict =PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
snake_case__ : int =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
snake_case__ : int =[
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
snake_case__ : Optional[int] =tokenizer(a , return_tensors="""np""" , truncation=a , max_length=5_1_2 , padding=a )
snake_case__ : str =model.generate(**a , num_beams=2 ).sequences
snake_case__ : List[str] =tokenizer.batch_decode(a , skip_special_tokens=a )
assert tgt_text == decoded
| 385 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE : List[str] = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def A_ ( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : dict ) -> PegasusForConditionalGeneration:
__SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : Any = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Tuple = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : List[Any] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__SCREAMING_SNAKE_CASE : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : Any = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = array
return tf_weights
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
# save tokenizer first
__SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ).parent.name
__SCREAMING_SNAKE_CASE : Optional[int] = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings''']
__SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
__SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE : Dict = task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_A = parser.parse_args()
if args.save_dir is None:
_A = Path(args.tf_ckpt_path).parent.name
_A = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 158 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : int=2 , _lowerCAmelCase : List[Any]=24 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : int=5 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : int=10 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=2 , ):
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[int] = patch_size
__snake_case : Dict = max_length
__snake_case : Optional[Any] = num_mel_bins
__snake_case : Optional[Any] = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : Dict = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : Any = scope
__snake_case : List[str] = frequency_stride
__snake_case : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__snake_case : List[str] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__snake_case : Dict = (self.max_length - self.patch_size) // self.time_stride + 1
__snake_case : Tuple = frequency_out_dimension * time_out_dimension
__snake_case : Union[str, Any] = num_patches + 2
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Any = self.get_config()
return config, input_values, labels
def snake_case__ ( self : List[Any] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
__snake_case : List[Any] = ASTModel(config=__a )
model.to(__a )
model.eval()
__snake_case : Union[str, Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple ):
__snake_case : Dict = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : List[Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A : Optional[int] = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
A : List[str] = False
A : Optional[Any] = False
A : Any = False
A : Optional[int] = False
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self : Tuple ):
__snake_case : Optional[int] = ASTModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case__ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Any ):
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def snake_case__ ( self : Tuple ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(__a )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , __a )
def snake_case__ ( self : Any ):
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def snake_case__ ( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = ASTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__snake_case , __snake_case : str = torchaudio.load(__snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Optional[Any] ):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self : Optional[int] ):
__snake_case : Union[str, Any] = self.default_feature_extractor
__snake_case : Union[str, Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(__a )
__snake_case : str = self.default_feature_extractor
__snake_case , __snake_case : Any = prepare_audio()
__snake_case : Optional[int] = audio.squeeze().numpy()
__snake_case : List[Any] = feature_extractor(__a , sampling_rate=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__snake_case : Optional[int] = model(**__a )
# verify the logits
__snake_case : Dict = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __a )
__snake_case : Union[str, Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 712 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "camembert"
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_05_22 , _lowerCAmelCase : str=7_68 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Optional[Any]=30_72 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=5_12 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[Any] = use_cache
__snake_case : str = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 390 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowercase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any]="no" , __UpperCamelCase : str = default_json_config_file , __UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
_A = Path(__UpperCamelCase )
path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
_A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
_A = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_A = torch.cuda.device_count()
_A = num_gpus
_A = False
if num_gpus > 1:
_A = 'MULTI_GPU'
else:
_A = 'NO'
elif is_xpu_available() and use_xpu:
_A = torch.xpu.device_count()
_A = num_xpus
_A = False
if num_xpus > 1:
_A = 'MULTI_XPU'
else:
_A = 'NO'
elif is_npu_available():
_A = torch.npu.device_count()
_A = num_npus
_A = False
if num_npus > 1:
_A = 'MULTI_NPU'
else:
_A = 'NO'
else:
_A = 0
_A = True
_A = 1
_A = 'NO'
_A = ClusterConfig(**__UpperCamelCase )
config.to_json_file(__UpperCamelCase )
return path
def lowerCamelCase_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
_A = parser.add_parser('default' , parents=__UpperCamelCase , help=__UpperCamelCase , formatter_class=__UpperCamelCase )
parser.add_argument(
'--config_file' , default=__UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__UpperCamelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCamelCase_ ( __UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 292 | 0 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None )-> list[list[str]]:
lowerCAmelCase_ : Optional[Any] = word_bank or []
# create a table
lowerCAmelCase_ : int = len(a_ ) + 1
lowerCAmelCase_ : list[list[list[str]]] = []
for _ in range(a_ ):
table.append([] )
# seed value
lowerCAmelCase_ : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a_ )] == word:
lowerCAmelCase_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a_ )]:
combination.reverse()
return table[len(a_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
) | 713 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ) -> IIRFilter:
__lowerCAmelCase : str = tau * frequency / samplerate
__lowerCAmelCase : List[str] = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = _sin / (2 * q_factor)
__lowerCAmelCase : str = (1 - _cos) / 2
__lowerCAmelCase : Union[str, Any] = 1 - _cos
__lowerCAmelCase : Optional[int] = 1 + alpha
__lowerCAmelCase : List[Any] = -2 * _cos
__lowerCAmelCase : Any = 1 - alpha
__lowerCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ) -> IIRFilter:
__lowerCAmelCase : Optional[int] = tau * frequency / samplerate
__lowerCAmelCase : Optional[int] = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = _sin / (2 * q_factor)
__lowerCAmelCase : Optional[int] = (1 + _cos) / 2
__lowerCAmelCase : Optional[Any] = -1 - _cos
__lowerCAmelCase : Tuple = 1 + alpha
__lowerCAmelCase : List[str] = -2 * _cos
__lowerCAmelCase : Optional[int] = 1 - alpha
__lowerCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ) -> IIRFilter:
__lowerCAmelCase : Tuple = tau * frequency / samplerate
__lowerCAmelCase : Tuple = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = _sin / (2 * q_factor)
__lowerCAmelCase : Optional[int] = _sin / 2
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Union[str, Any] = -ba
__lowerCAmelCase : List[Any] = 1 + alpha
__lowerCAmelCase : Any = -2 * _cos
__lowerCAmelCase : str = 1 - alpha
__lowerCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) ) -> IIRFilter:
__lowerCAmelCase : Tuple = tau * frequency / samplerate
__lowerCAmelCase : Optional[int] = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = _sin / (2 * q_factor)
__lowerCAmelCase : str = 1 - alpha
__lowerCAmelCase : Union[str, Any] = -2 * _cos
__lowerCAmelCase : Tuple = 1 + alpha
__lowerCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ) -> IIRFilter:
__lowerCAmelCase : Optional[int] = tau * frequency / samplerate
__lowerCAmelCase : Any = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = _sin / (2 * q_factor)
__lowerCAmelCase : Optional[int] = 10 ** (gain_db / 40)
__lowerCAmelCase : Tuple = 1 + alpha * big_a
__lowerCAmelCase : str = -2 * _cos
__lowerCAmelCase : Union[str, Any] = 1 - alpha * big_a
__lowerCAmelCase : Dict = 1 + alpha / big_a
__lowerCAmelCase : Union[str, Any] = -2 * _cos
__lowerCAmelCase : Optional[int] = 1 - alpha / big_a
__lowerCAmelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ) -> IIRFilter:
__lowerCAmelCase : Union[str, Any] = tau * frequency / samplerate
__lowerCAmelCase : str = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = _sin / (2 * q_factor)
__lowerCAmelCase : int = 10 ** (gain_db / 40)
__lowerCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase : Any = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase : List[str] = 2 * sqrt(SCREAMING_SNAKE_CASE ) * alpha
__lowerCAmelCase : int = big_a * (pmc + aaa)
__lowerCAmelCase : List[Any] = 2 * big_a * mpc
__lowerCAmelCase : Dict = big_a * (pmc - aaa)
__lowerCAmelCase : Optional[Any] = ppmc + aaa
__lowerCAmelCase : Optional[int] = -2 * pmpc
__lowerCAmelCase : List[Any] = ppmc - aaa
__lowerCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float = 1 / sqrt(2 ) , ) -> IIRFilter:
__lowerCAmelCase : int = tau * frequency / samplerate
__lowerCAmelCase : Any = sin(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = cos(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
__lowerCAmelCase : int = 10 ** (gain_db / 40)
__lowerCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase : Any = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase : Tuple = 2 * sqrt(SCREAMING_SNAKE_CASE ) * alpha
__lowerCAmelCase : Union[str, Any] = big_a * (ppmc + aaa)
__lowerCAmelCase : Optional[Any] = -2 * big_a * pmpc
__lowerCAmelCase : List[Any] = big_a * (ppmc - aaa)
__lowerCAmelCase : Union[str, Any] = pmc + aaa
__lowerCAmelCase : int = 2 * mpc
__lowerCAmelCase : Union[str, Any] = pmc - aaa
__lowerCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 504 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __lowercase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : List[Any] )->str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : int = 1 , _snake_case : int = 100 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[float] = None , _snake_case : bool = True , )->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
__lowerCAmelCase : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCAmelCase : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
__lowerCAmelCase : Optional[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__lowerCAmelCase : Optional[Any] = int(_snake_case )
if sample_size % down_scale_factor != 0:
__lowerCAmelCase : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
__lowerCAmelCase : int = int(_snake_case )
__lowerCAmelCase : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
__lowerCAmelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__lowerCAmelCase : Tuple = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
# set step values
self.scheduler.set_timesteps(_snake_case , device=audio.device )
__lowerCAmelCase : Dict = self.scheduler.timesteps.to(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCAmelCase : str = self.unet(_snake_case , _snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCAmelCase : Optional[int] = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
__lowerCAmelCase : int = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCAmelCase : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_snake_case ) | 504 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> Dict:
lowerCamelCase_ : List[Any] =SwinConfig()
lowerCamelCase_ : Optional[Any] =swin_name.split("_" )
lowerCamelCase_ : int =name_split[1]
lowerCamelCase_ : Optional[int] =int(name_split[4] )
lowerCamelCase_ : Optional[Any] =int(name_split[3][-1] )
if model_size == "tiny":
lowerCamelCase_ : Union[str, Any] =96
lowerCamelCase_ : List[Any] =(2, 2, 6, 2)
lowerCamelCase_ : Optional[int] =(3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase_ : Tuple =96
lowerCamelCase_ : Optional[Any] =(2, 2, 18, 2)
lowerCamelCase_ : str =(3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase_ : Dict =128
lowerCamelCase_ : str =(2, 2, 18, 2)
lowerCamelCase_ : Dict =(4, 8, 16, 32)
else:
lowerCamelCase_ : List[str] =192
lowerCamelCase_ : int =(2, 2, 18, 2)
lowerCamelCase_ : List[Any] =(6, 12, 24, 48)
if "in22k" in swin_name:
lowerCamelCase_ : Optional[Any] =21_841
else:
lowerCamelCase_ : Any =1_000
lowerCamelCase_ : int ="huggingface/label-files"
lowerCamelCase_ : int ="imagenet-1k-id2label.json"
lowerCamelCase_ : int =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ : List[Any] ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[int] =idalabel
lowerCamelCase_ : str ={v: k for k, v in idalabel.items()}
lowerCamelCase_ : List[str] =img_size
lowerCamelCase_ : List[str] =num_classes
lowerCamelCase_ : Optional[int] =embed_dim
lowerCamelCase_ : int =depths
lowerCamelCase_ : List[Any] =num_heads
lowerCamelCase_ : int =window_size
return config
def _snake_case ( lowerCamelCase__ : List[str] ) -> List[Any]:
if "patch_embed.proj" in name:
lowerCamelCase_ : Dict =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ : Optional[int] =name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCamelCase_ : str ="encoder." + name
if "attn.proj" in name:
lowerCamelCase_ : List[Any] =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ : int =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ : Optional[int] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ : Optional[int] =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ : Optional[int] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ : Tuple =name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
lowerCamelCase_ : Optional[int] ="layernorm.weight"
if name == "norm.bias":
lowerCamelCase_ : List[Any] ="layernorm.bias"
if "head" in name:
lowerCamelCase_ : Union[str, Any] =name.replace("head" , "classifier" )
else:
lowerCamelCase_ : Optional[int] ="swin." + name
return name
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Dict ) -> str:
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : List[Any] =orig_state_dict.pop(lowerCamelCase__ )
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase_ : Optional[Any] =key.split("." )
lowerCamelCase_ : List[Any] =int(key_split[1] )
lowerCamelCase_ : int =int(key_split[3] )
lowerCamelCase_ : Tuple =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase_ : List[Any] =val[:dim, :]
lowerCamelCase_ : Optional[Any] =val[
dim : dim * 2, :
]
lowerCamelCase_ : Union[str, Any] =val[-dim:, :]
else:
lowerCamelCase_ : Tuple =val[
:dim
]
lowerCamelCase_ : Tuple =val[
dim : dim * 2
]
lowerCamelCase_ : Union[str, Any] =val[
-dim:
]
else:
lowerCamelCase_ : Optional[Any] =val
return orig_state_dict
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ) -> Optional[int]:
lowerCamelCase_ : int =timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
lowerCamelCase_ : Any =get_swin_config(lowerCamelCase__ )
lowerCamelCase_ : Dict =SwinForImageClassification(lowerCamelCase__ )
model.eval()
lowerCamelCase_ : Dict =convert_state_dict(timm_model.state_dict() , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ : Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ : Any =AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
lowerCamelCase_ : Tuple =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ : Optional[int] =image_processor(images=lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ : List[Any] =timm_model(inputs["pixel_values"] )
lowerCamelCase_ : int =model(**lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A__ : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 244 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Tuple = ["image_processor", "tokenizer"]
_UpperCAmelCase :Dict = "LayoutLMv3ImageProcessor"
_UpperCAmelCase :Any = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : List[Any] , snake_case__ : str=None , snake_case__ : int=None , **snake_case__ : Dict ):
lowerCamelCase_ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
lowerCamelCase_ : List[str] =kwargs.pop("feature_extractor" )
lowerCamelCase_ : Union[str, Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : Optional[Any] , snake_case__ : Any , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case__ : Union[List[List[int]], List[List[List[int]]]] = None , snake_case__ : Optional[Union[List[int], List[List[int]]]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : int , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowerCamelCase_ : Dict =self.image_processor(images=snake_case__ , return_tensors=snake_case__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : List[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : List[str] =features["words"]
lowerCamelCase_ : List[str] =self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel values
lowerCamelCase_ : str =features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCamelCase_ : Tuple =self.get_overflowing_images(snake_case__ , encoded_inputs["overflow_to_sample_mapping"] )
lowerCamelCase_ : Dict =images
return encoded_inputs
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ : Optional[int] =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(snake_case__ )} and {len(snake_case__ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self : str , *snake_case__ : List[str] , **snake_case__ : List[str] ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , *snake_case__ : Tuple , **snake_case__ : Any ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , )
return self.image_processor
| 244 | 1 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = [x.strip() for x in open(_lowerCAmelCase ).readlines()]
_lowerCamelCase : Dict = [x.strip() for x in open(_lowerCAmelCase ).readlines()][: len(_lowerCAmelCase )]
_lowerCamelCase : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
if save_path is not None:
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 44 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ) -> str:
for attribute in key.split('.' ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
lowerCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ) -> Any:
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.feature_extractor
lowerCamelCase_ = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase_ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(_lowerCamelCase )[0].split('.' )[-2]
lowerCamelCase_ = mapped_key.replace('*' , _lowerCamelCase )
if "weight_g" in name:
lowerCamelCase_ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ = 'weight_v'
elif "bias" in name:
lowerCamelCase_ = 'bias'
elif "weight" in name:
lowerCamelCase_ = 'weight'
else:
lowerCamelCase_ = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[Any] ) -> Optional[int]:
lowerCamelCase_ = full_name.split('conv_layers.' )[-1]
lowerCamelCase_ = name.split('.' )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ) -> List[str]:
lowerCamelCase_ = full_name.split('adaptor.' )[-1]
lowerCamelCase_ = name.split('.' )
if items[1].isdigit():
lowerCamelCase_ = int(items[1] )
else:
lowerCamelCase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCamelCase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCamelCase_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> int:
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowerCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , ) -> Dict:
lowerCamelCase_ = WavaVecaConfig.from_pretrained(
_lowerCamelCase , add_adapter=_lowerCamelCase , adapter_stride=_lowerCamelCase , adapter_kernel_size=_lowerCamelCase , use_auth_token=_lowerCamelCase , output_hidden_size=_lowerCamelCase , )
lowerCamelCase_ = MBartConfig.from_pretrained(_lowerCamelCase )
# load model
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
lowerCamelCase_ = model[0].eval()
# load feature extractor
lowerCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , use_auth_token=_lowerCamelCase )
# set weights for wav2vec2 encoder
lowerCamelCase_ = WavaVecaModel(_lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase )
# load decoder weights
lowerCamelCase_ = MBartForCausalLM(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase_ = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
lowerCamelCase_ = False
lowerCamelCase_ = MBartaaTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCamelCase_ = hf_wavavec.config.to_dict()
lowerCamelCase_ = tokenizer.pad_token_id
lowerCamelCase_ = tokenizer.bos_token_id
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = 'mbart50'
lowerCamelCase_ = 'wav2vec2'
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = 250004
lowerCamelCase_ = tokenizer.eos_token_id
lowerCamelCase_ = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''')
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 137 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase : Any=None , _lowerCamelCase : Any=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class a :
SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
SCREAMING_SNAKE_CASE : List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
SCREAMING_SNAKE_CASE : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Use FP16 to accelerate inference."""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Benchmark training of model"""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Verbose memory tracing"""} )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Trace memory line by line"""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Save result to a CSV file"""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Save all print statements in a log file"""} )
SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Whether to print environment information"""} )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
SCREAMING_SNAKE_CASE : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
SCREAMING_SNAKE_CASE : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
SCREAMING_SNAKE_CASE : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
SCREAMING_SNAKE_CASE : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
SCREAMING_SNAKE_CASE : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
SCREAMING_SNAKE_CASE : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
SCREAMING_SNAKE_CASE : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
SCREAMING_SNAKE_CASE : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , __SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def UpperCamelCase ( self : Optional[int] ) -> str:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 137 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =ShapEPipeline
UpperCamelCase =["prompt"]
UpperCamelCase =["prompt"]
UpperCamelCase =[
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCamelCase =False
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> Dict:
return 32
@property
def _lowerCamelCase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 8
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_snake_case )
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__lowercase : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__lowercase : str = PriorTransformer(**_snake_case )
return model
@property
def _lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__lowercase : List[Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__lowercase : str = ShapERenderer(**_snake_case )
return model
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : List[str] = self.dummy_prior
__lowercase : Any = self.dummy_text_encoder
__lowercase : List[Any] = self.dummy_tokenizer
__lowercase : List[str] = self.dummy_renderer
__lowercase : Any = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=_snake_case , clip_sample=_snake_case , clip_sample_range=1.0 , )
__lowercase : List[Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> List[str]:
if str(_snake_case ).startswith('''mps''' ):
__lowercase : List[Any] = torch.manual_seed(_snake_case )
else:
__lowercase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowercase : List[Any] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[int] = '''cpu'''
__lowercase : Tuple = self.get_dummy_components()
__lowercase : int = self.pipeline_class(**_snake_case )
__lowercase : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Union[str, Any] = pipe(**self.get_dummy_inputs(_snake_case ) )
__lowercase : Optional[Any] = output.images[0]
__lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase : Dict = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self ) -> int:
__lowercase : int = torch_device == '''cpu'''
__lowercase : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_snake_case , relax_max_difference=_snake_case , )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : List[str] = self.pipeline_class(**_snake_case )
__lowercase : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Dict = 1
__lowercase : str = 2
__lowercase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
for key in inputs.keys():
if key in self.batch_params:
__lowercase : int = batch_size * [inputs[key]]
__lowercase : Any = pipe(**_snake_case , num_images_per_prompt=_snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__lowercase : List[str] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__lowercase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : int = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowercase : Optional[int] = pipe(
'''a shark''' , generator=_snake_case , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 76 | """simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__magic_name__ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__magic_name__ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ = create_model(
'HTSAT-tiny','roberta',UpperCAmelCase__,precision='fp32',device='cuda:0' if torch.cuda.is_available() else 'cpu',enable_fusion=UpperCAmelCase__,fusion_type='aff_2d' if enable_fusion else None,)
return model, model_cfg
def _lowerCamelCase ( UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = {}
a__ = R'.*sequential.(\d+).*'
a__ = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a__ = key.replace(UpperCAmelCase__,UpperCAmelCase__ )
if re.match(UpperCAmelCase__,UpperCAmelCase__ ):
# replace sequential layers with list
a__ = re.match(UpperCAmelCase__,UpperCAmelCase__ ).group(1 )
a__ = key.replace(f'''sequential.{sequential_layer}.''',f'''layers.{int(UpperCAmelCase__ )//3}.linear.''' )
elif re.match(UpperCAmelCase__,UpperCAmelCase__ ):
a__ = int(re.match(UpperCAmelCase__,UpperCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
a__ = 1 if projecton_layer == 0 else 2
a__ = key.replace(f'''_projection.{projecton_layer}.''',f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
a__ = value
a__ = mixed_qkv.size(0 ) // 3
a__ = mixed_qkv[:qkv_dim]
a__ = mixed_qkv[qkv_dim : qkv_dim * 2]
a__ = mixed_qkv[qkv_dim * 2 :]
a__ = query_layer
a__ = key_layer
a__ = value_layer
else:
a__ = value
return model_state_dict
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=False ) -> Any:
'''simple docstring'''
a__ , a__ = init_clap(UpperCAmelCase__,enable_fusion=UpperCAmelCase__ )
clap_model.eval()
a__ = clap_model.state_dict()
a__ = rename_state_dict(UpperCAmelCase__ )
a__ = ClapConfig()
a__ = enable_fusion
a__ = ClapModel(UpperCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCAmelCase__,strict=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
transformers_config.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__magic_name__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 232 | 0 |
"""simple docstring"""
from functools import reduce
__lowerCAmelCase : Optional[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCAmelCase ( __UpperCamelCase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCamelCase , __UpperCamelCase : str(int(__UpperCamelCase ) * int(__UpperCamelCase ) ) , n[i : i + 1_3] ) )
for i in range(len(__UpperCamelCase ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = {}
def a__ ( self : List[str] , UpperCamelCase_ : List[str] ):
'''simple docstring'''
if vertex not in self.adjacency:
__magic_name__ = {}
self.num_vertices += 1
def a__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.add_vertex(UpperCamelCase_ )
self.add_vertex(UpperCamelCase_ )
if head == tail:
return
__magic_name__ = weight
__magic_name__ = weight
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.get_edges()
for edge in edges:
__magic_name__ , __magic_name__ , __magic_name__ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCamelCase_ ) ):
__magic_name__ = list(edges[i] )
edges.sort(key=lambda UpperCamelCase_ : e[2] )
for i in range(len(UpperCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__magic_name__ = edges[i][2] + 1
for edge in edges:
__magic_name__ , __magic_name__ , __magic_name__ = edge
__magic_name__ = weight
__magic_name__ = weight
def __str__( self : Any ):
'''simple docstring'''
__magic_name__ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__magic_name__ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : Dict ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def a__ ( UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Union[str, Any]=None ):
'''simple docstring'''
__magic_name__ = Graph()
if vertices is None:
__magic_name__ = []
if edges is None:
__magic_name__ = []
for vertex in vertices:
g.add_vertex(UpperCamelCase_ )
for edge in edges:
g.add_edge(*UpperCamelCase_ )
return g
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
__magic_name__ = {}
__magic_name__ = {}
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.parent )
def a__ ( self : Dict , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
if item in self.parent:
return self.find(UpperCamelCase_ )
__magic_name__ = item
__magic_name__ = 0
return item
def a__ ( self : Optional[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(UpperCamelCase_ )
if item != self.parent[item]:
__magic_name__ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = self.find(UpperCamelCase_ )
__magic_name__ = self.find(UpperCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__magic_name__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
__magic_name__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__magic_name__ = roota
return roota
return None
@staticmethod
def a__ ( UpperCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = graph.num_vertices
__magic_name__ = Graph.UnionFind()
__magic_name__ = []
while num_components > 1:
__magic_name__ = {}
for vertex in graph.get_vertices():
__magic_name__ = -1
__magic_name__ = graph.get_edges()
for edge in edges:
__magic_name__ , __magic_name__ , __magic_name__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
__magic_name__ , __magic_name__ , __magic_name__ = edge
__magic_name__ = union_find.find(UpperCamelCase_ )
__magic_name__ = union_find.find(UpperCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__magic_name__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__magic_name__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__magic_name__ , __magic_name__ , __magic_name__ = cheap_edge[vertex]
if union_find.find(UpperCamelCase_ ) != union_find.find(UpperCamelCase_ ):
union_find.union(UpperCamelCase_ , UpperCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
__magic_name__ = num_components - 1
__magic_name__ = Graph.build(edges=UpperCamelCase_ )
return mst | 545 |
"""simple docstring"""
def A ( __snake_case: int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__magic_name__ = limit + 1
__magic_name__ = [0] * limit
for first_term in range(1 , __snake_case ):
for n in range(__snake_case , __snake_case , __snake_case ):
__magic_name__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__magic_name__ = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""") | 545 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False ):
"""simple docstring"""
snake_case_ : str = """backbone.""" if is_semantic else """"""
snake_case_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', """beit.embeddings.cls_token"""),
(f'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
snake_case_ : Union[str, Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
snake_case_ : Union[str, Any] = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
snake_case_ : str = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
snake_case_ : Tuple = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
snake_case_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : List[Any] = q_bias
snake_case_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ : Dict = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
snake_case_ : List[Any] = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
snake_case_ : int = gamma_a
snake_case_ : Any = gamma_a
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : str = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = val
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
"""simple docstring"""
snake_case_ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
snake_case_ : Optional[int] = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ : Any = 1_0_2_4
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : Any = 2_4
snake_case_ : Optional[int] = 1_6
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ : List[Any] = 1_6
snake_case_ : Tuple = """huggingface/label-files"""
snake_case_ : Tuple = """rvlcdip-id2label.json"""
snake_case_ : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
snake_case_ : int = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
snake_case_ : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
snake_case_ : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = prepare_img()
snake_case_ : Dict = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
snake_case_ : Optional[int] = encoding["""pixel_values"""]
snake_case_ : int = model(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = outputs.logits
# verify logits
snake_case_ : Any = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
snake_case_ : int = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
snake_case_ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
a_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 48 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =tweepy.OAuthHandler(_lowerCAmelCase , _lowerCAmelCase )
auth.set_access_token(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =tweepy.API(_lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
__lowercase =[]
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase =api.user_timeline(screen_name=_lowerCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(_lowerCAmelCase )
# save the id of the oldest tweet less one
__lowercase =alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_lowerCAmelCase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase =api.user_timeline(
screen_name=_lowerCAmelCase , count=200 , max_id=_lowerCAmelCase )
# save most recent tweets
alltweets.extend(_lowerCAmelCase )
# update the id of the oldest tweet less one
__lowercase =alltweets[-1].id - 1
print(f"""...{len(_lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase =[[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
__lowercase =csv.writer(_lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 474 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( _lowerCAmelCase = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
__lowercase =nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
__lowercase =nums[i]
__lowercase =max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase = int(input("""Enter number of elements : """).strip())
lowerCamelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 474 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
_lowerCamelCase : List[str] = parser.parse_args()
if args.model_type == "bert":
_lowerCamelCase : Dict = BertForMaskedLM.from_pretrained(args.model_name)
_lowerCamelCase : Optional[Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
_lowerCamelCase : Dict = model.state_dict()
_lowerCamelCase : List[str] = {}
for w in ["word_embeddings", "position_embeddings"]:
_lowerCamelCase : List[Any] = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
_lowerCamelCase : Union[str, Any] = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
_lowerCamelCase : Optional[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_lowerCamelCase : Optional[Any] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
_lowerCamelCase : List[Any] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
_lowerCamelCase : str = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
_lowerCamelCase : List[Any] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
_lowerCamelCase : Optional[Any] = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
_lowerCamelCase : Tuple = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
_lowerCamelCase : Any = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
_lowerCamelCase : Dict = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
_lowerCamelCase : Any = state_dict['''cls.predictions.decoder.weight''']
_lowerCamelCase : Any = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowerCamelCase : Dict = state_dict[f"cls.predictions.transform.dense.{w}"]
_lowerCamelCase : str = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 157 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCamelCase : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowerCamelCase : List[str] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowerCamelCase : Optional[int] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ), id="references" ),
} ), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def A_ ( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = compute_bleu(
reference_corpus=_UpperCAmelCase, translation_corpus=_UpperCAmelCase, max_order=_UpperCAmelCase, smooth=_UpperCAmelCase )
((SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 157 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_snake_case : Union[str, Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
_snake_case : Dict = None
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.' )
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.' )
parser.add_argument(
'--out-file', '-o', metavar='eval.json', help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file', '-n', metavar='na_prob.json', help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh', '-t', type=lowerCAmelCase_, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).', )
parser.add_argument(
'--out-image-dir', '-p', metavar='out_images', default=lowerCAmelCase_, help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose', '-v', action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCAmelCase = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( lowerCAmelCase_ : Dict ):
def remove_articles(lowerCAmelCase_ : Optional[int] ):
return ARTICLES_REGEX.sub(' ', lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ : int ):
__lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def a_ ( lowerCAmelCase_ : List[Any] ):
if not s:
return []
return normalize_answer(lowerCAmelCase_ ).split()
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = get_tokens(lowerCAmelCase_ )
__lowerCAmelCase = get_tokens(lowerCAmelCase_ )
__lowerCAmelCase = collections.Counter(lowerCAmelCase_ ) & collections.Counter(lowerCAmelCase_ )
__lowerCAmelCase = sum(common.values() )
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCAmelCase = 1.0 * num_same / len(lowerCAmelCase_ )
__lowerCAmelCase = 1.0 * num_same / len(lowerCAmelCase_ )
__lowerCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCAmelCase = qa['id']
__lowerCAmelCase = [t for t in qa['answers']['text'] if normalize_answer(lowerCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCAmelCase = ['']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
__lowerCAmelCase = preds[qid]
# Take max over all gold answers
__lowerCAmelCase = max(compute_exact(lowerCAmelCase_, lowerCAmelCase_ ) for a in gold_answers )
__lowerCAmelCase = max(compute_fa(lowerCAmelCase_, lowerCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = {}
for qid, s in scores.items():
__lowerCAmelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCAmelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCAmelCase = s
return new_scores
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : str, lowerCAmelCase_ : Tuple=None ):
if not qid_list:
__lowerCAmelCase = len(lowerCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
__lowerCAmelCase = len(lowerCAmelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : str ):
for k in new_eval:
__lowerCAmelCase = new_eval[k]
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
plt.step(lowerCAmelCase_, lowerCAmelCase_, color='b', alpha=0.2, where='post' )
plt.fill_between(lowerCAmelCase_, lowerCAmelCase_, step='post', alpha=0.2, color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase_ )
plt.savefig(lowerCAmelCase_ )
plt.clf()
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[Any]=None, lowerCAmelCase_ : int=None ):
__lowerCAmelCase = sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : na_probs[k] )
__lowerCAmelCase = 0.0
__lowerCAmelCase = 1.0
__lowerCAmelCase = 0.0
__lowerCAmelCase = [1.0]
__lowerCAmelCase = [0.0]
__lowerCAmelCase = 0.0
for i, qid in enumerate(lowerCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCAmelCase = true_pos / float(i + 1 )
__lowerCAmelCase = true_pos / float(lowerCAmelCase_ )
if i == len(lowerCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase_ )
recalls.append(lowerCAmelCase_ )
if out_image:
plot_pr_curve(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
if out_image_dir and not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowerCAmelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCAmelCase = make_precision_recall_eval(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, out_image=os.path.join(lowerCAmelCase_, 'pr_exact.png' ), title='Precision-Recall curve for Exact Match score', )
__lowerCAmelCase = make_precision_recall_eval(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, out_image=os.path.join(lowerCAmelCase_, 'pr_f1.png' ), title='Precision-Recall curve for F1 score', )
__lowerCAmelCase = {k: float(lowerCAmelCase_ ) for k, v in qid_to_has_ans.items()}
__lowerCAmelCase = make_precision_recall_eval(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, out_image=os.path.join(lowerCAmelCase_, 'pr_oracle.png' ), title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)', )
merge_eval(lowerCAmelCase_, lowerCAmelCase_, 'pr_exact' )
merge_eval(lowerCAmelCase_, lowerCAmelCase_, 'pr_f1' )
merge_eval(lowerCAmelCase_, lowerCAmelCase_, 'pr_oracle' )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
if not qid_list:
return
__lowerCAmelCase = [na_probs[k] for k in qid_list]
__lowerCAmelCase = np.ones_like(lowerCAmelCase_ ) / float(len(lowerCAmelCase_ ) )
plt.hist(lowerCAmelCase_, weights=lowerCAmelCase_, bins=20, range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase_, F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCAmelCase = num_no_ans
__lowerCAmelCase = cur_score
__lowerCAmelCase = 0.0
__lowerCAmelCase = sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCAmelCase = scores[qid]
else:
if preds[qid]:
__lowerCAmelCase = -1
else:
__lowerCAmelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCAmelCase = cur_score
__lowerCAmelCase = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase_ ), best_thresh
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = find_best_thresh(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = find_best_thresh(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = best_exact
__lowerCAmelCase = exact_thresh
__lowerCAmelCase = best_fa
__lowerCAmelCase = fa_thresh
def a_ ( ):
with open(OPTS.data_file ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
__lowerCAmelCase = dataset_json['data']
with open(OPTS.pred_file ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
else:
__lowerCAmelCase = {k: 0.0 for k in preds}
__lowerCAmelCase = make_qid_to_has_ans(lowerCAmelCase_ ) # maps qid to True/False
__lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCAmelCase = get_raw_scores(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = apply_no_ans_threshold(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, OPTS.na_prob_thresh )
__lowerCAmelCase = apply_no_ans_threshold(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, OPTS.na_prob_thresh )
__lowerCAmelCase = make_eval_dict(lowerCAmelCase_, lowerCAmelCase_ )
if has_ans_qids:
__lowerCAmelCase = make_eval_dict(lowerCAmelCase_, lowerCAmelCase_, qid_list=lowerCAmelCase_ )
merge_eval(lowerCAmelCase_, lowerCAmelCase_, 'HasAns' )
if no_ans_qids:
__lowerCAmelCase = make_eval_dict(lowerCAmelCase_, lowerCAmelCase_, qid_list=lowerCAmelCase_ )
merge_eval(lowerCAmelCase_, lowerCAmelCase_, 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase_, lowerCAmelCase_, OPTS.out_image_dir, 'hasAns' )
histogram_na_prob(lowerCAmelCase_, lowerCAmelCase_, OPTS.out_image_dir, 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file, 'w' ) as f:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
else:
print(json.dumps(lowerCAmelCase_, indent=2 ) )
if __name__ == "__main__":
_snake_case : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 53 |
from copy import deepcopy
class snake_case__ :
"""simple docstring"""
def __init__( self : Union[str, Any], _snake_case : list[int] | None = None, _snake_case : int | None = None ) ->None:
if arr is None and size is not None:
snake_case__ : Optional[Any] = size
snake_case__ : Tuple = [0] * size
elif arr is not None:
self.init(_snake_case )
else:
raise ValueError('Either arr or size must be specified' )
def lowercase_ ( self : Tuple, _snake_case : list[int] ) ->None:
snake_case__ : Optional[int] = len(_snake_case )
snake_case__ : Tuple = deepcopy(_snake_case )
for i in range(1, self.size ):
snake_case__ : Any = self.next_(_snake_case )
if j < self.size:
self.tree[j] += self.tree[i]
def lowercase_ ( self : Dict ) ->list[int]:
snake_case__ : Any = self.tree[:]
for i in range(self.size - 1, 0, -1 ):
snake_case__ : List[str] = self.next_(_snake_case )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowercase_ ( _snake_case : int ) ->int:
return index + (index & (-index))
@staticmethod
def lowercase_ ( _snake_case : int ) ->int:
return index - (index & (-index))
def lowercase_ ( self : int, _snake_case : int, _snake_case : int ) ->None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ : List[Any] = self.next_(_snake_case )
def lowercase_ ( self : Dict, _snake_case : int, _snake_case : int ) ->None:
self.add(_snake_case, value - self.get(_snake_case ) )
def lowercase_ ( self : Dict, _snake_case : int ) ->int:
if right == 0:
return 0
snake_case__ : int = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ : Tuple = self.prev(_snake_case )
return result
def lowercase_ ( self : Dict, _snake_case : int, _snake_case : int ) ->int:
return self.prefix(_snake_case ) - self.prefix(_snake_case )
def lowercase_ ( self : Any, _snake_case : int ) ->int:
return self.query(_snake_case, index + 1 )
def lowercase_ ( self : Optional[int], _snake_case : int ) ->int:
value -= self.tree[0]
if value < 0:
return -1
snake_case__ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 0 |
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowerCamelCase( *UpperCamelCase__ : Dict ) -> List[Any]:
with open(UpperCamelCase__ , '''r''' ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
snake_case_ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
snake_case_ = torch.device("""cuda""", local_rank)
snake_case_ = socket.gethostname()
snake_case_ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case_ = dist.get_rank()
snake_case_ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 707 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
A : Any = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A : List[Any] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A : List[Any] = F'''{src_lang}-{tgt_lang}'''
A : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A : Optional[int] = os.path.join(UpperCamelCase__ , '''README.md''' )
print(F'''Generating {path}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
snake_case_ = Path(__file__).resolve().parent.parent.parent
snake_case_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case_ , snake_case_ , snake_case_ = model_name.split("""-""")
snake_case_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 537 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( _A, _A, _A, _A, _A=True, _A="pt" ):
"""simple docstring"""
__magic_name__ : Any = {"""add_prefix_space""": True} if isinstance(_A, _A ) and not line.startswith(""" """ ) else {}
__magic_name__ : List[str] = padding_side
return tokenizer(
[line], max_length=_A, padding="""max_length""" if pad_to_max_length else None, truncation=_A, return_tensors=_A, add_special_tokens=_A, **_A, )
def UpperCamelCase ( _A, _A, _A=None, ):
"""simple docstring"""
__magic_name__ : List[str] = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ) -> List[str]:
super().__init__()
__magic_name__ : List[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + """.source""" )
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase__ ).joinpath(type_path + """.target""" )
__magic_name__ : List[str] = self.get_char_lens(self.src_file )
__magic_name__ : Tuple = max_source_length
__magic_name__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
__magic_name__ : Dict = tokenizer
__magic_name__ : Optional[Any] = prefix
if n_obs is not None:
__magic_name__ : Dict = self.src_lens[:n_obs]
__magic_name__ : Optional[Any] = src_lang
__magic_name__ : Any = tgt_lang
def __len__( self ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]:
__magic_name__ : Any = index + 1 # linecache starts at 1
__magic_name__ : str = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip("""\n""" )
__magic_name__ : Optional[int] = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip("""\n""" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__magic_name__ : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
__magic_name__ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
__magic_name__ : Tuple = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , """right""" )
__magic_name__ : Optional[int] = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , """right""" )
__magic_name__ : List[str] = source_inputs["""input_ids"""].squeeze()
__magic_name__ : int = target_inputs["""input_ids"""].squeeze()
__magic_name__ : str = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> int:
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]:
__magic_name__ : Union[str, Any] = torch.stack([x["""input_ids"""] for x in batch] )
__magic_name__ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
__magic_name__ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
__magic_name__ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
__magic_name__ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
__magic_name__ : int = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : List[str] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__magic_name__ : Any = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__: Tuple = getLogger(__name__)
def UpperCamelCase ( _A ):
"""simple docstring"""
return list(itertools.chain.from_iterable(_A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[str] = get_git_info()
save_json(_A, os.path.join(_A, """git_log.json""" ) )
def UpperCamelCase ( _A, _A, _A=4, **_A ):
"""simple docstring"""
with open(_A, """w""" ) as f:
json.dump(_A, _A, indent=_A, **_A )
def UpperCamelCase ( _A ):
"""simple docstring"""
with open(_A ) as f:
return json.load(_A )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = git.Repo(search_parent_directories=_A )
__magic_name__ : Union[str, Any] = {
"""repo_id""": str(_A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return list(map(_A, _A ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
with open(_A, """wb""" ) as f:
return pickle.dump(_A, _A )
def UpperCamelCase ( _A ):
"""simple docstring"""
def remove_articles(_A ):
return re.sub(R"""\b(a|an|the)\b""", """ """, _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
__magic_name__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : str = normalize_answer(_A ).split()
__magic_name__ : Union[str, Any] = normalize_answer(_A ).split()
__magic_name__ : str = Counter(_A ) & Counter(_A )
__magic_name__ : str = sum(common.values() )
if num_same == 0:
return 0
__magic_name__ : Tuple = 1.0 * num_same / len(_A )
__magic_name__ : List[Any] = 1.0 * num_same / len(_A )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return normalize_answer(_A ) == normalize_answer(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert len(_A ) == len(_A )
__magic_name__ : Any = 0
for hypo, pred in zip(_A, _A ):
em += exact_match_score(_A, _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def UpperCamelCase ( _A ):
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__magic_name__ : List[str] = """dropout_rate"""
for p in extra_params:
if getattr(_A, _A, _A ):
if not hasattr(_A, _A ) and not hasattr(_A, equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(_A ) )
delattr(_A, _A )
continue
__magic_name__ : Optional[Any] = p if hasattr(_A, _A ) else equivalent_param[p]
setattr(_A, _A, getattr(_A, _A ) )
delattr(_A, _A )
return hparams, config
| 324 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.0_2 , ) -> int:
__magic_name__ : List[str] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : str = is_training
__magic_name__ : List[str] = use_input_mask
__magic_name__ : Union[str, Any] = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : List[Any] = rotary_dim
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[int] = None
__magic_name__ : List[Any] = vocab_size - 1
__magic_name__ : Any = vocab_size - 1
__magic_name__ : List[Any] = vocab_size - 1
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __magic_name__ ( self ) -> str:
__magic_name__ : str = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : int = config_and_inputs
__magic_name__ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = 20
__magic_name__ : Dict = model_class_name(lowerCAmelCase__ )
__magic_name__ : Optional[int] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__magic_name__ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : str = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Any = 20
__magic_name__ : Tuple = model_class_name(lowerCAmelCase__ )
__magic_name__ : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__magic_name__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Dict = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : Union[str, Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__magic_name__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase__ : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[str] = FlaxGPTJModelTester(self )
def __magic_name__ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@tooslow
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
__magic_name__ : Optional[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__magic_name__ : int = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : str = False
__magic_name__ : str = model.config.eos_token_id
__magic_name__ : Optional[int] = jax.jit(model.generate )
__magic_name__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
__magic_name__ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ : Any = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : int = pt_inputs["""input_ids"""].shape
__magic_name__ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : str = 0
__magic_name__ : Any = 1
__magic_name__ : Union[str, Any] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Dict = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
__magic_name__ : List[str] = fx_state
with torch.no_grad():
__magic_name__ : Dict = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : str = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : str = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__magic_name__ : List[str] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
__magic_name__ ,__magic_name__ : Union[str, Any] = pt_inputs["""input_ids"""].shape
__magic_name__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : List[Any] = 0
__magic_name__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__magic_name__ : int = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : Tuple = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : List[Any] = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
with torch.no_grad():
__magic_name__ : Tuple = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __magic_name__ ( self ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ : Tuple = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 324 | 1 |
"""simple docstring"""
from math import ceil
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(range(0 , snake_case_ ) )
UpperCAmelCase : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case_ )
# Missing blocks
UpperCAmelCase : List[str] = [i for i in blocks if i not in device_map_blocks]
UpperCAmelCase : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(snake_case_ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(snake_case_ ) )
if len(snake_case_ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(snake_case_ ) )
def lowercase_ ( _lowercase : Dict , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : int = list(range(snake_case_ ) )
UpperCAmelCase : Tuple = int(ceil(n_layers / len(snake_case_ ) ) )
UpperCAmelCase : int = [layers[i : i + n_blocks] for i in range(0 , snake_case_ , snake_case_ )]
return dict(zip(snake_case_ , snake_case_ ) )
| 710 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class snake_case__ :
SCREAMING_SNAKE_CASE__ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = '''gelu'''
def __init__( self : Tuple , lowercase : Any , lowercase : Optional[Any]=13 , lowercase : Any=7 , lowercase : List[Any]=True , lowercase : List[str]=False , lowercase : Optional[Any]=99 , lowercase : Union[str, Any]=32 , lowercase : List[Any]=2 , lowercase : Tuple=4 , lowercase : Union[str, Any]=37 , lowercase : str=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Optional[Any]=20 , lowercase : str=2 , lowercase : int=1 , lowercase : Optional[int]=0 , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : List[Any] = pad_token_id
UpperCAmelCase : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCAmelCase ( self : int , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
UpperCAmelCase : List[Any] = inputs_dict["input_ids"]
UpperCAmelCase : int = input_ids[:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
UpperCAmelCase , UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple=None , _lowercase : Any=None , _lowercase : str=None , _lowercase : Optional[Any]=None , _lowercase : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : List[str] = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = TFBlenderbotSmallModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
SCREAMING_SNAKE_CASE__ = '''facebook/blenderbot_small-90M'''
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 292 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def A ( *a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@require_torch
def A ( self : int ):
"""simple docstring"""
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
] , )
@require_tf
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(a_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
] , )
@slow
@require_torch
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def A ( self : int ):
"""simple docstring"""
__snake_case = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case = image_classifier(a_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__snake_case = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 69 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """autoformer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : bool = True , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 32 , a_ : int = 32 , a_ : str = "gelu" , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : bool = True , a_ : Union[str, Any]=True , a_ : int = 10 , a_ : int = 25 , a_ : int = 3 , **a_ : Tuple , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length if context_length is not None else prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Autoformer
__snake_case = label_length
__snake_case = moving_average
__snake_case = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 69 | 1 |
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_UpperCAmelCase = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ) -> str:
UpperCamelCase_ = True
while ask_again:
UpperCamelCase_ = input(UpperCamelCase_ )
try:
if default is not None and len(UpperCamelCase_ ) == 0:
return default
return convert_value(UpperCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=[] , UpperCamelCase_=None , UpperCamelCase_=0 ) -> Any:
UpperCamelCase_ = BulletMenu(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = menu.run(default_choice=UpperCamelCase_ )
return convert_value(UpperCamelCase_ ) if convert_value is not None else result
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = int(UpperCamelCase_ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = int(UpperCamelCase_ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Any:
UpperCamelCase_ = int(UpperCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = int(UpperCamelCase_ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Dict:
UpperCamelCase_ = int(UpperCamelCase_ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
return {"yes": True, "no": False}[value.lower()]
class _UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = super()._format_usage(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = usage.replace("<command> [<args>] " , "" )
return usage
| 707 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[int]:
return x + 2
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = "x = 3"
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3} )
UpperCamelCase_ = "x = y"
UpperCamelCase_ = {"y": 5}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 5, "y": 5} )
def lowercase ( self: List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = "y = add_two(x)"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=_SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = "x = 3"
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3} )
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ = "test_dict = {'x': x, 'y': add_two(x)}"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = "x = 3\ny = 5"
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = "text = f'This is x: {x}.'"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "text": "This is x: 3."} )
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "y": 2} )
UpperCamelCase_ = {"x": 8}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 8, "y": 5} )
def lowercase ( self: str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = "test_list = [x, add_two(x)]"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "test_list": [3, 5]} )
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = "y = x"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {} , state=_SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "y": 3} )
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = "test_list = [x, add_two(x)]\ntest_list[1]"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=_SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "test_list": [3, 5]} )
UpperCamelCase_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
UpperCamelCase_ = {"x": 3}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=_SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = "x = 0\nfor i in range(3):\n x = i"
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(_SCREAMING_SNAKE_CASE , {"range": range} , state=_SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {"x": 2, "i": 2} )
| 371 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
A = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
A = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] ,dtype=tf.floataa ,) # expected non filtered values as noted above
A = tf_top_k_top_p_filtering(A_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
A = output[output != -float('inf' )]
A = tf.cast(
tf.where(tf.not_equal(A_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(A_ ,A_ ,rtol=1e-12 )
tf.debugging.assert_equal(A_ ,A_ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase , _lowercase ):
'''simple docstring'''
if is_tf_available():
_lowerCamelCase: Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
# TF-only test: tf.saved_model export
A = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = 2
A = 2
class lowerCAmelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple ,A_ : Dict ) -> Any:
super(A_ ,self ).__init__()
A = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ) -> Dict:
A = self.model.generate(
input_ids=A_ ,attention_mask=A_ ,max_new_tokens=A_ ,return_dict_in_generate=A_ ,)
return {"sequences": outputs["sequences"]}
A = [[2, 0], [102, 103]]
A = [[1, 0], [1, 1]]
A = DummyModel(model=A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A_ ,A_ ,signatures={'serving_default': dummy_model.serving} )
A = tf.saved_model.load(A_ ).signatures['serving_default']
for batch_size in range(1 ,len(A_ ) + 1 ):
A = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
A = serving_func(**A_ )['sequences']
A = test_model.generate(**A_ ,max_new_tokens=A_ )
tf.debugging.assert_equal(A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
# TF-only test: tf.saved_model export
A = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = 1
A = 2
class lowerCAmelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : List[str] ) -> int:
super(A_ ,self ).__init__()
A = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=A_ ,)
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ,A_ : List[Any] ) -> List[str]:
A = self.model.generate(
input_ids=A_ ,attention_mask=A_ ,max_new_tokens=A_ ,return_dict_in_generate=A_ ,)
return {"sequences": outputs["sequences"]}
A = [[2], [102, 103]]
A = [[1], [1, 1]]
A = DummyModel(model=A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(A_ ,A_ ,signatures={'serving_default': dummy_model.serving} )
A = tf.saved_model.load(A_ ).signatures['serving_default']
for input_row in range(len(A_ ) ):
A = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
A = serving_func(**A_ )['sequences']
A = test_model.generate(**A_ ,max_new_tokens=A_ )
tf.debugging.assert_equal(A_ ,A_ )
@slow
@require_tensorflow_text
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=A_ )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int ) -> Tuple:
super().__init__()
A = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(A_ ,'spiece.model' ) ,'rb' ).read() )
A = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,*A_ : Optional[int] ,**A_ : Optional[int] ) -> Union[str, Any]:
A = self.tokenizer.tokenize(A_ )
A , A = text.pad_model_inputs(
A_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
A = self.model.generate(input_ids=A_ ,attention_mask=A_ )
return self.tokenizer.detokenize(A_ )
A = CompleteSentenceTransformer()
A = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
A = complete_model(A_ )
A = tf.keras.Model(A_ ,A_ )
keras_model.save(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
# Has PT equivalent: this test relies on random sampling
A = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
A = 14
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = 'Hello, my dog is cute and'
A = tokenizer(A_ ,return_tensors='tf' )
A = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
A = model.generate(**A_ ,eos_token_id=A_ ,**A_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
A = model.generate(**A_ ,eos_token_id=A_ ,**A_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# Has PT equivalent: ample use of framework-specific code
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
A = 'Hugging Face is a technology company based in New York and Paris.'
A = bart_tokenizer(A_ ,return_tensors='tf' ).input_ids
A = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
A = bart_model.generate(A_ ).numpy()
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[Any]=None ,**A_ : Optional[int] ) -> Dict:
return super().call(A_ ,**A_ )
A = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
A = bart_model.generate(A_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(A_ ,A_ ) )
class lowerCAmelCase_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any ,**A_ : Optional[Any] ) -> str:
return super().call(A_ ,**A_ )
A = FakeEncoder(bart_model.config ,bart_model.model.shared )
A = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A = bart_model.generate(A_ ).numpy()
with self.assertRaises(A_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(A_ ,foo='bar' ) | 91 |
"""simple docstring"""
import random
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> tuple[list[int], list[int]]:
A = [ord(lowerCamelCase_ ) for i in text]
A = []
A = []
for i in plain:
A = random.randint(1 ,3_0_0 )
A = (i + k) * k
cipher.append(lowerCamelCase_ )
key.append(lowerCamelCase_ )
return cipher, key
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> str:
A = []
for i in range(len(lowerCamelCase_ ) ):
A = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase_ ) )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase , UpperCAmelCase =Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 617 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__snake_case :str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
__snake_case :ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
__snake_case :ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
__snake_case :str = "question"
__snake_case :str = "context"
__snake_case :str = "answers"
@property
def _a ( self : Optional[int] ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 714 |
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
lowerCAmelCase = {"""mgp-str""": 27}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any]="[GO]" , lowerCamelCase_ :str="[GO]" , lowerCamelCase_ :Optional[int]="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , **a_ , )
with open(a_ , encoding='utf-8' ) as vocab_handle:
lowerCamelCase__ : Optional[int] =json.load(a_ )
lowerCamelCase__ : str ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =[]
for s in text:
char_tokens.extend(a_ )
return char_tokens
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str ):
"""simple docstring"""
return self.vocab.get(a_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Any ):
"""simple docstring"""
return self.decoder.get(a_ )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :int = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(a_ ) )
return
lowerCamelCase__ : int =os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + '\n' )
return (vocab_file,) | 174 |
'''simple docstring'''
lowerCAmelCase : List[str] = 2_5_6
# Modulus to hash a string
lowerCAmelCase : Tuple = 1_0_0_0_0_0_3
def _A ( A ,A ) -> bool:
lowercase : List[Any] = len(A )
lowercase : List[Any] = len(A )
if p_len > t_len:
return False
lowercase : List[str] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(A ):
lowercase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ) -> None:
lowercase : Dict = "abc1abc12"
lowercase : Union[str, Any] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(A ,A ) and not rabin_karp(A ,A )
# Test 2)
lowercase : str = "ABABX"
lowercase : Union[str, Any] = "ABABZABABYABABX"
assert rabin_karp(A ,A )
# Test 3)
lowercase : str = "AAAB"
lowercase : List[str] = "ABAAAAAB"
assert rabin_karp(A ,A )
# Test 4)
lowercase : List[str] = "abcdabcy"
lowercase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(A ,A )
# Test 5)
lowercase : int = "Lü"
lowercase : Optional[Any] = "Lüsai"
assert rabin_karp(A ,A )
lowercase : Tuple = "Lue"
assert not rabin_karp(A ,A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("RGB" )
return image
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Any , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = dct.pop(UpperCAmelCase__ )
a = val
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
a = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
a = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase__ , requires_grad=UpperCAmelCase__ ), v_bias) )
a = qkv_bias
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
a = 3_64 if "coco" in model_name else 2_24
a = InstructBlipVisionConfig(image_size=UpperCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
a = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
a = InstructBlipConfig(vision_config=UpperCAmelCase__ , text_config=UpperCAmelCase__ , qformer_config=UpperCAmelCase__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Union[str, Any]=None , UpperCAmelCase__ :List[str]=False ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a , a = get_blipa_config(UpperCAmelCase__ )
a = InstructBlipForConditionalGeneration(UpperCAmelCase__ ).eval()
a = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a , a = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a = "cuda:1" if torch.cuda.is_available() else "cpu"
a = "cuda:2" if torch.cuda.is_available() else "cpu"
a , a , a = load_model_and_preprocess(
name=UpperCAmelCase__ , model_type=UpperCAmelCase__ , is_eval=UpperCAmelCase__ , device=UpperCAmelCase__ )
original_model.eval()
print("Done!" )
# update state dict keys
a = original_model.state_dict()
a = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a = state_dict.pop(UpperCAmelCase__ )
if key.startswith("Qformer.bert" ):
a = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a = key.replace("self" , "attention" )
if "llm_proj" in key:
a = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a = key.replace("t5" , "language" )
a = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase__ , UpperCAmelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
a = load_demo_image()
a = "What is unusual about this image?"
# create processor
a = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
a = InstructBlipProcessor(
image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ , )
a = processor(images=UpperCAmelCase__ , text=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# make sure processor creates exact same pixel values
a = vis_processors["eval"](UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
a = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
hf_model.to(UpperCAmelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
a = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a = hf_model(**UpperCAmelCase__ ).logits
else:
a = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a = tokenizer("\n" , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ )
a = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
a = hf_model(**UpperCAmelCase__ , labels=UpperCAmelCase__ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase__ , atol=UpperCAmelCase__ )
print("Looks ok!" )
print("Generating with original model..." )
a = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a = hf_model.generate(
**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a = 2
print("Original generation:" , UpperCAmelCase__ )
a = processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
a = [text.strip() for text in output_text]
print("HF generation:" , UpperCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
A_ : List[str] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = LEDConfig
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Optional[int] = '''gelu'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , _A=4 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(_UpperCamelCase )[:, :-1], tf.ones_like(_UpperCamelCase )[:, -1:]] , axis=-1 , )
__SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFLEDModel(config=_UpperCamelCase ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-3 )
def __lowercase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , ) -> Tuple:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ : str = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ : Tuple = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : Any = True
UpperCamelCase__ : str = False
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Any = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict['attention_mask'] )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_A ):
__SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_A ):
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
__SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
__SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_decoder_attentions_output(_UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
def __lowercase ( a__ ) -> int:
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
lowerCAmelCase__ : Any =1E-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
__SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE = (1, 1_024, 768)
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
__SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 , rtol=1e-3 )
| 148 |
import argparse
import struct
import unittest
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = data
# Initialize hash values
lowerCAmelCase_ = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowerCAmelCase_ = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
lowerCAmelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __a ( _UpperCamelCase ) -> bytes:
lowerCAmelCase_ = B"\x80" + (B"\x00" * (63 - (len(_UpperCamelCase ) + 8) % 64))
lowerCAmelCase_ = struct.pack(">Q" , (len(_UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def __a ( self ) -> None:
# Convert into blocks of 64 bytes
lowerCAmelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase_ = list(struct.unpack(">16L" , _UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
lowerCAmelCase_ = self.ror(_UpperCamelCase , 6 ) ^ self.ror(_UpperCamelCase , 11 ) ^ self.ror(_UpperCamelCase , 25 )
lowerCAmelCase_ = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
lowerCAmelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
lowerCAmelCase_ = self.ror(_UpperCamelCase , 2 ) ^ self.ror(_UpperCamelCase , 13 ) ^ self.ror(_UpperCamelCase , 22 )
lowerCAmelCase_ = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase_ = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
lowerCAmelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase_ = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase_ = "".join([hex(_UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> None:
import hashlib
lowerCAmelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_UpperCamelCase ).hash , hashlib.shaaaa(_UpperCamelCase ).hexdigest() )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase_ = f.read()
else:
lowerCAmelCase_ = bytes(__lowerCAmelCase , "utf-8" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 290 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : str ):
snake_case__ : Any = (3, 3_2, 1_2_8)
snake_case__ : List[Any] = tempfile.mkdtemp()
# fmt: off
snake_case__ : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case__ : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
snake_case__ : List[str] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 3_2, """width""": 1_2_8},
}
snake_case__ : Dict = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def lowercase ( self : Dict , **__a : str ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowercase ( self : List[Any] , **__a : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def lowercase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
snake_case__ : int = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def lowercase ( self : Dict ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : int = self.get_image_processor()
snake_case__ : str = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowercase ( self : Optional[int] ):
snake_case__ : str = self.get_tokenizer()
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : List[Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
snake_case__ : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : int = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
snake_case__ : str = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowercase ( self : Dict ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : List[str] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Optional[Any] = self.prepare_image_inputs()
snake_case__ : str = image_processor(__a , return_tensors="""np""" )
snake_case__ : Dict = processor(images=__a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Any ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Tuple = """test"""
snake_case__ : List[str] = processor(text=__a )
snake_case__ : Union[str, Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Dict = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Tuple = """test"""
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : Any = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowercase ( self : str ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : int = processor.char_decode(__a )
snake_case__ : Optional[int] = tokenizer.batch_decode(__a )
snake_case__ : Optional[int] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Tuple = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : str = None
snake_case__ : Optional[Any] = self.prepare_image_inputs()
snake_case__ : Dict = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowercase ( self : Any ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : str = MgpstrProcessor(tokenizer=__a , image_processor=__a )
snake_case__ : List[Any] = torch.randn(1 , 2_7 , 3_8 )
snake_case__ : str = torch.randn(1 , 2_7 , 5_0_2_5_7 )
snake_case__ : Tuple = torch.randn(1 , 2_7 , 3_0_5_2_2 )
snake_case__ : Optional[int] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 127 |
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""")
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case__ : List[str] = grid[0]
for row_n in range(1 , len(UpperCAmelCase_)):
snake_case__ : List[str] = grid[row_n]
snake_case__ : Any = fill_row(UpperCAmelCase_ , UpperCAmelCase_)
snake_case__ : Union[str, Any] = grid[row_n]
return grid[-1][-1]
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCAmelCase_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : Union[str, Any] ) -> Dict:
if exponent == 1:
return base
if exponent % 2 == 0:
_SCREAMING_SNAKE_CASE : Dict = _modexpt(a__, exponent // 2, a__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a__, exponent - 1, a__ )) % modulo_value
def _lowerCAmelCase ( lowerCamelCase__ : str = 1_7_7_7, lowerCamelCase__ : Any = 1_8_5_5, lowerCamelCase__ : List[str] = 8 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = base
for _ in range(1, a__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = _modexpt(a__, a__, 1_0**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 572 |
'''simple docstring'''
from math import isqrt
def a__ ( a__ ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(a__ ) + 1 ) )
def a__ ( a__ = 10**6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 7
while prime_candidate < max_prime:
primes_count += is_prime(a__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 627 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : List[Any] = get_activation('gelu_10' )
A_ : int = torch_builtin(snake_case_ )
A_ : Dict = geluaa(snake_case_ )
A_ : str = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(snake_case_ ):
get_activation('bogus' )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = get_activation('gelu' )
A_ : Any = 1
A_ : Any = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
A_ : Union[str, Any] = acta.a | 302 |
"""simple docstring"""
import requests
lowerCamelCase_ : Any = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>') | 302 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 625 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
re.sub('<n>','',_SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 186 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ : Tuple = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowercase__ : Optional[Any] = None
def __lowercase ( ):
snake_case_ : int = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=_a , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=_a , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowercase ( _a ):
snake_case_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ : List[str] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __lowercase ( _a ):
def remove_articles(_a ):
return ARTICLES_REGEX.sub(''' ''' , _a )
def white_space_fix(_a ):
return " ".join(text.split() )
def remove_punc(_a ):
snake_case_ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) )
def __lowercase ( _a ):
if not s:
return []
return normalize_answer(_a ).split()
def __lowercase ( _a , _a ):
return int(normalize_answer(_a ) == normalize_answer(_a ) )
def __lowercase ( _a , _a ):
snake_case_ : Dict = get_tokens(_a )
snake_case_ : List[str] = get_tokens(_a )
snake_case_ : Dict = collections.Counter(_a ) & collections.Counter(_a )
snake_case_ : List[Any] = sum(common.values() )
if len(_a ) == 0 or len(_a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(_a )
snake_case_ : Optional[int] = 1.0 * num_same / len(_a )
snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {}
snake_case_ : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ : int = qa['''id''']
snake_case_ : List[Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(_a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ : List[str] = ['''''']
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
snake_case_ : Optional[Any] = preds[qid]
# Take max over all gold answers
snake_case_ : Optional[Any] = max(compute_exact(_a , _a ) for a in gold_answers )
snake_case_ : Tuple = max(compute_fa(_a , _a ) for a in gold_answers )
return exact_scores, fa_scores
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : str = {}
for qid, s in scores.items():
snake_case_ : str = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
snake_case_ : List[str] = s
return new_scores
def __lowercase ( _a , _a , _a=None ):
if not qid_list:
snake_case_ : Any = len(_a )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
snake_case_ : int = len(_a )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __lowercase ( _a , _a , _a ):
for k in new_eval:
snake_case_ : Optional[int] = new_eval[k]
def __lowercase ( _a , _a , _a , _a ):
plt.step(_a , _a , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(_a , _a , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_a )
plt.savefig(_a )
plt.clf()
def __lowercase ( _a , _a , _a , _a , _a=None , _a=None ):
snake_case_ : List[str] = sorted(_a , key=lambda _a : na_probs[k] )
snake_case_ : Optional[int] = 0.0
snake_case_ : Union[str, Any] = 1.0
snake_case_ : Tuple = 0.0
snake_case_ : str = [1.0]
snake_case_ : Union[str, Any] = [0.0]
snake_case_ : int = 0.0
for i, qid in enumerate(_a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ : Optional[int] = true_pos / float(i + 1 )
snake_case_ : List[Any] = true_pos / float(_a )
if i == len(_a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_a )
recalls.append(_a )
if out_image:
plot_pr_curve(_a , _a , _a , _a )
return {"ap": 100.0 * avg_prec}
def __lowercase ( _a , _a , _a , _a , _a , _a ):
if out_image_dir and not os.path.exists(_a ):
os.makedirs(_a )
snake_case_ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ : Optional[Any] = make_precision_recall_eval(
_a , _a , _a , _a , out_image=os.path.join(_a , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
snake_case_ : Union[str, Any] = make_precision_recall_eval(
_a , _a , _a , _a , out_image=os.path.join(_a , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
snake_case_ : str = {k: float(_a ) for k, v in qid_to_has_ans.items()}
snake_case_ : Union[str, Any] = make_precision_recall_eval(
_a , _a , _a , _a , out_image=os.path.join(_a , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(_a , _a , '''pr_exact''' )
merge_eval(_a , _a , '''pr_f1''' )
merge_eval(_a , _a , '''pr_oracle''' )
def __lowercase ( _a , _a , _a , _a ):
if not qid_list:
return
snake_case_ : Optional[int] = [na_probs[k] for k in qid_list]
snake_case_ : Union[str, Any] = np.ones_like(_a ) / float(len(_a ) )
plt.hist(_a , weights=_a , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_a , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : Optional[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ : Optional[int] = num_no_ans
snake_case_ : Any = cur_score
snake_case_ : int = 0.0
snake_case_ : Dict = sorted(_a , key=lambda _a : na_probs[k] )
for i, qid in enumerate(_a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
snake_case_ : List[Any] = -1
else:
snake_case_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
snake_case_ : Union[str, Any] = cur_score
snake_case_ : Union[str, Any] = na_probs[qid]
return 100.0 * best_score / len(_a ), best_thresh
def __lowercase ( _a , _a , _a , _a , _a , _a ):
snake_case_, snake_case_ : str = find_best_thresh(_a , _a , _a , _a )
snake_case_, snake_case_ : int = find_best_thresh(_a , _a , _a , _a )
snake_case_ : Optional[Any] = best_exact
snake_case_ : Union[str, Any] = exact_thresh
snake_case_ : Union[str, Any] = best_fa
snake_case_ : str = fa_thresh
def __lowercase ( ):
with open(OPTS.data_file ) as f:
snake_case_ : Union[str, Any] = json.load(_a )
snake_case_ : Any = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
snake_case_ : int = json.load(_a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ : int = json.load(_a )
else:
snake_case_ : Tuple = {k: 0.0 for k in preds}
snake_case_ : Optional[int] = make_qid_to_has_ans(_a ) # maps qid to True/False
snake_case_ : List[str] = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_, snake_case_ : List[Any] = get_raw_scores(_a , _a )
snake_case_ : Any = apply_no_ans_threshold(_a , _a , _a , OPTS.na_prob_thresh )
snake_case_ : int = apply_no_ans_threshold(_a , _a , _a , OPTS.na_prob_thresh )
snake_case_ : int = make_eval_dict(_a , _a )
if has_ans_qids:
snake_case_ : Dict = make_eval_dict(_a , _a , qid_list=_a )
merge_eval(_a , _a , '''HasAns''' )
if no_ans_qids:
snake_case_ : Optional[Any] = make_eval_dict(_a , _a , qid_list=_a )
merge_eval(_a , _a , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(_a , _a , _a , _a , _a , _a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_a , _a , _a , _a , _a , OPTS.out_image_dir )
histogram_na_prob(_a , _a , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(_a , _a , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(_a , _a )
else:
print(json.dumps(_a , indent=2 ) )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 485 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowercase ( _a , _a , _a , _a=None , _a=None ):
# Recurse if needed
if "." in tensor_name:
snake_case_ : Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ : Any = getattr(_a , _a )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
snake_case_ : int = new_module
snake_case_ : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case_ : Tuple = tensor_name in module._buffers
snake_case_ : Optional[int] = getattr(_a , _a )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
snake_case_ : Optional[Any] = False
snake_case_ : Tuple = False
else:
snake_case_ : Tuple = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case_ : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case_ : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case_ : Any = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : str = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case_ : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case_ : Tuple = torch.tensor(_a , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _a ) and fpaa_statistics is None:
snake_case_ : Any = new_value.T
snake_case_ : Tuple = old_value.__dict__
if is_abit:
snake_case_ : Tuple = bnb.nn.IntaParams(_a , requires_grad=_a , **_a ).to(_a )
elif is_abit:
snake_case_ : Any = bnb.nn.Paramsabit(_a , requires_grad=_a , **_a ).to(_a )
snake_case_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_a ) )
else:
if value is None:
snake_case_ : Dict = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : Dict = value.to(_a )
else:
snake_case_ : str = torch.tensor(_a , device=_a )
if is_buffer:
snake_case_ : Optional[int] = new_value
else:
snake_case_ : Optional[Any] = nn.Parameter(_a , requires_grad=old_value.requires_grad )
snake_case_ : List[Any] = new_value
def __lowercase ( _a , _a=None , _a=None , _a=None , _a=False ):
for name, module in model.named_children():
if current_key_name is None:
snake_case_ : List[str] = []
current_key_name.append(_a )
if (isinstance(_a , nn.Linear ) or isinstance(_a , _a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_a , _a ):
snake_case_, snake_case_ : List[Any] = module.weight.shape
else:
snake_case_ : Dict = module.in_features
snake_case_ : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case_ : str = bnb.nn.LinearabitLt(
_a , _a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case_ : str = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case_ : Union[str, Any] = bnb.nn.Linearabit(
_a , _a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case_ : List[Any] = True
# Store the module class in case we need to transpose the weight later
snake_case_ : str = type(_a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_a )
if len(list(module.children() ) ) > 0:
snake_case_, snake_case_ : Optional[int] = _replace_with_bnb_linear(
_a , _a , _a , _a , has_been_replaced=_a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( _a , _a=None , _a=None , _a=None ):
snake_case_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case_, snake_case_ : List[Any] = _replace_with_bnb_linear(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _a , )
return replace_with_bnb_linear(*_a , **_a )
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _a , )
return set_module_quantized_tensor_to_device(*_a , **_a )
def __lowercase ( _a ):
snake_case_ : List[str] = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case_ : Optional[Any] = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
snake_case_ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ : str = sum(_a , [] )
snake_case_ : str = len(_a ) > 0
# Check if it is a base model
snake_case_ : Dict = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ : Dict = list(model.named_children() )
snake_case_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ : Optional[int] = set(_a ) - set(_a )
snake_case_ : List[Any] = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
snake_case_ : Any = ['''.weight''', '''.bias''']
snake_case_ : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ : List[str] = name.replace(_a , '''''' )
filtered_module_names.append(_a )
return filtered_module_names
| 485 | 1 |
# using dfs for finding eulerian path traversal
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Dict:
UpperCamelCase_: str = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCamelCase_ ,UpperCamelCase_: Any = True, True
UpperCamelCase_: List[str] = dfs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return path
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: int = 0
UpperCamelCase_: Optional[int] = -1
for i in range(UpperCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCamelCase_: Union[str, Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = check_circuit_or_path(UpperCAmelCase__ , UpperCAmelCase__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
UpperCamelCase_: str = 1
if check == 2:
UpperCamelCase_: Optional[int] = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
UpperCamelCase_: str = dfs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
print(UpperCAmelCase__ )
def snake_case () -> Optional[int]:
UpperCamelCase_: List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCamelCase_: Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCamelCase_: Dict = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCamelCase_: str = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCamelCase_: str = {
1: [],
2: []
# all degree is zero
}
UpperCamelCase_: Optional[Any] = 1_0
check_euler(UpperCAmelCase__ , UpperCAmelCase__ )
check_euler(UpperCAmelCase__ , UpperCAmelCase__ )
check_euler(UpperCAmelCase__ , UpperCAmelCase__ )
check_euler(UpperCAmelCase__ , UpperCAmelCase__ )
check_euler(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main() | 57 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 472 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : Optional[int] = logging.getLogger()
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( lowerCamelCase__ ):
def A__ ( self :Optional[Any] , __snake_case :Optional[int] ):
'''simple docstring'''
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__magic_name__ : Any ={'''source''': '''What is love ?''', '''target''': '''life'''}
__magic_name__ : Union[str, Any] ={'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__magic_name__ : Dict ='''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowerCamelCase , f"{split}.{field}" ) , """w""" ) as f:
f.write(__lowerCamelCase )
def A__ ( self :Optional[Any] , __snake_case :Optional[Any] , __snake_case :Optional[int] = "pytorch" ):
'''simple docstring'''
__magic_name__ : Dict =self.get_auto_remove_tmp_dir()
__magic_name__ : str =os.path.join(__lowerCamelCase , """output""" )
__magic_name__ : Optional[Any] =os.path.join(__lowerCamelCase , """data""" )
self._create_dummy_data(data_dir=__lowerCamelCase )
__magic_name__ : Dict =f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
__magic_name__ : str =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
__magic_name__ : Optional[int] =os.path.join(__lowerCamelCase , """metrics.json""" )
with open(__lowerCamelCase ) as f:
__magic_name__ : Dict =json.load(__lowerCamelCase )
return result
@require_torch_gpu
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : List[Any] =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[str] =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Dict =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 702 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , ):
__magic_name__ : Optional[int] ={}
if train_file is not None:
__magic_name__ : Optional[int] =[train_file]
if eval_file is not None:
__magic_name__ : Any =[eval_file]
if test_file is not None:
__magic_name__ : int =[test_file]
__magic_name__ : Any =datasets.load_dataset("""csv""" , data_files=lowerCamelCase )
__magic_name__ : Optional[Any] =list(ds[list(files.keys() )[0]].features.keys() )
__magic_name__ : Optional[Any] =features_name.pop(lowerCamelCase )
__magic_name__ : str =list(set(ds[list(files.keys() )[0]][label_name] ) )
__magic_name__ : Union[str, Any] ={label: i for i, label in enumerate(lowerCamelCase )}
__magic_name__ : Dict =tokenizer.model_input_names
__magic_name__ : Any ={}
if len(lowerCamelCase ) == 1:
for k in files.keys():
__magic_name__ : Dict =ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" ) , batched=lowerCamelCase , )
elif len(lowerCamelCase ) == 2:
for k in files.keys():
__magic_name__ : Optional[Any] =ds[k].map(
lambda lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" , ) , batched=lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__magic_name__ : Any ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Any =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__magic_name__ : Dict ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : str =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__magic_name__ : Union[str, Any] ={k: v for k, v in ex.items() if k in input_names}
__magic_name__ : Optional[int] =labelaid[ex[label_name]]
yield (d, label)
__magic_name__ : Union[str, Any] =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__magic_name__ : Optional[Any] =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__magic_name__ : Optional[Any] =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__magic_name__ : Any =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__magic_name__ : Any =(
tf.data.Dataset.from_generator(
lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__magic_name__ : Optional[int] =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ : int = logging.getLogger(__name__)
@dataclass
class __A :
UpperCamelCase = field(metadata={"""help""": """Which column contains the label"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the training file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the development file"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """The path of the test file"""} )
UpperCamelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __A :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(default=UpperCamelCase__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ : List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : Dict =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__magic_name__ : Any =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase ) , labelaid=lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__magic_name__ : Any =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase ) -> Dict:
__magic_name__ : Tuple =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__magic_name__ : int =TFTrainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__magic_name__ : List[str] ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : List[str] =trainer.evaluate()
__magic_name__ : Optional[Any] =os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 367 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""input_features""", """attention_mask"""]
def __init__( self : Any , a_ : Optional[Any]=80 , a_ : Optional[Any]=16_000 , a_ : Tuple=80 , a_ : int=0.0 , a_ : List[Any]=True , a_ : Union[str, Any]=True , a_ : List[Any]=True , **a_ : List[Any] , ):
"""simple docstring"""
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
__snake_case = num_mel_bins
__snake_case = do_ceptral_normalize
__snake_case = normalize_means
__snake_case = normalize_vars
__snake_case = True
def A ( self : Tuple , a_ : np.ndarray , ):
"""simple docstring"""
__snake_case = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__snake_case = torch.from_numpy(a_ ).unsqueeze(0 )
__snake_case = ta_kaldi.fbank(a_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A ( a_ : np.ndarray , a_ : int , a_ : Optional[bool] = True , a_ : Optional[bool] = True , a_ : float = 0.0 , ):
"""simple docstring"""
if normalize_means:
__snake_case = x[:input_length].mean(axis=0 )
__snake_case = np.subtract(a_ , a_ )
if normalize_vars:
__snake_case = x[:input_length].std(axis=0 )
__snake_case = np.divide(a_ , a_ )
if input_length < x.shape[0]:
__snake_case = padding_value
# make sure array is in float32
__snake_case = x.astype(np.floataa )
return x
def A ( self : Union[str, Any] , a_ : List[np.ndarray] , a_ : Optional[np.ndarray] = None ):
"""simple docstring"""
__snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(a_ , a_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(a_ , a_ )
]
def __call__( self : List[Any] , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[int] = None , a_ : Optional[bool] = None , **a_ : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__snake_case = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__snake_case = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case = [np.asarray(a_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
__snake_case = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case = [raw_speech]
# extract fbank features
__snake_case = [self._extract_fbank_features(a_ ) for waveform in raw_speech]
# convert into correct format for padding
__snake_case = BatchFeature({"input_features": features} )
__snake_case = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
# make sure list is in array format
__snake_case = padded_inputs.get("input_features" )
if isinstance(input_features[0] , a_ ):
__snake_case = [np.asarray(a_ , dtype=np.floataa ) for feature in input_features]
__snake_case = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__snake_case = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__snake_case = (
np.array(a_ , dtype=np.intaa )
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__snake_case = self.normalize(
padded_inputs["input_features"] , attention_mask=a_ )
if return_tensors is not None:
__snake_case = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
| 69 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def A ( snake_case__ ):
'''simple docstring'''
def decorator(snake_case__ ):
SCREAMING_SNAKE_CASE__ = getattr(snake_case__ , """handle_key""" , [] )
handle += [key]
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
def A ( *snake_case__ ):
'''simple docstring'''
def decorator(snake_case__ ):
SCREAMING_SNAKE_CASE__ = getattr(snake_case__ , """handle_key""" , [] )
handle += keys
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
class lowerCamelCase (A__ ):
def __new__( cls : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super().__new__(cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not hasattr(__UpperCAmelCase , """key_handler""" ):
setattr(__UpperCAmelCase , """key_handler""" , {} )
setattr(__UpperCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , """handle_key""" , [] )
for key in handled_keys:
SCREAMING_SNAKE_CASE__ = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = get_character()
if char != KEYMAP["undefined"]:
SCREAMING_SNAKE_CASE__ = ord(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cls.key_handler.get(__UpperCAmelCase )
if handler:
SCREAMING_SNAKE_CASE__ = char
return handler(cls )
else:
return None
def A ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 196 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCamelCase = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
"""simple docstring"""
snake_case__ = "all_checks"
snake_case__ = "basic_checks"
snake_case__ = "no_checks"
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def _A ( lowerCAmelCase_ : Optional[dict] , lowerCAmelCase_ : dict , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
lowerCAmelCase__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase__ = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase_ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def _A ( lowerCAmelCase_ : Optional[dict] , lowerCAmelCase_ : dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
if len(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) ) )
lowerCAmelCase__ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase_ ) )
logger.info("All the splits matched successfully." )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : bool = True ):
"""simple docstring"""
if record_checksum:
lowerCAmelCase__ = shaaaa()
with open(lowerCAmelCase_ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(lowerCAmelCase_ )
lowerCAmelCase__ = m.hexdigest()
else:
lowerCAmelCase__ = None
return {"num_bytes": os.path.getsize(lowerCAmelCase_ ), "checksum": checksum}
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 125 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ = label_idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict:
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int:
lowerCAmelCase__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 125 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , **__A ):
"""simple docstring"""
super().__init__(**a_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __A , **__A ):
"""simple docstring"""
return super().__call__(a_ , **a_ )
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
lowerCamelCase : Tuple = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCamelCase : Optional[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _snake_case ( self , __A , __A=None , __A="This is a photo of {}." ):
"""simple docstring"""
lowerCamelCase : Dict = load_image(a_ )
lowerCamelCase : str = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCamelCase : Any = candidate_labels
lowerCamelCase : Union[str, Any] = [hypothesis_template.format(a_ ) for x in candidate_labels]
lowerCamelCase : Tuple = self.tokenizer(a_ , return_tensors=self.framework , padding=a_ )
lowerCamelCase : Tuple = [text_inputs]
return inputs
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = model_inputs.pop("candidate_labels" )
lowerCamelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , a_ ):
lowerCamelCase : Tuple = text_inputs[0]
else:
# Batching case.
lowerCamelCase : Tuple = text_inputs[0][0]
lowerCamelCase : str = self.model(**a_ , **a_ )
lowerCamelCase : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = model_outputs.pop("candidate_labels" )
lowerCamelCase : List[Any] = model_outputs["logits"][0]
if self.framework == "pt":
lowerCamelCase : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCamelCase : Union[str, Any] = probs.tolist()
if not isinstance(a_ , a_ ):
lowerCamelCase : str = [scores]
elif self.framework == "tf":
lowerCamelCase : Dict = stable_softmax(a_ , axis=-1 )
lowerCamelCase : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCamelCase : int = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(a_ , a_ ) , key=lambda __A : -x[0] )
]
return result
| 340 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__snake_case = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1 )
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def a__ ( ) -> str:
UpperCAmelCase__ : Any = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[str] = g.get_repo('''huggingface/diffusers''' )
UpperCAmelCase__ : str = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : str = sorted(issue.get_comments() , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowercase__ )
UpperCAmelCase__ : List[str] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 719 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase__ : str = parser.parse_args()
return args.f
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = os.path.join(lowerCAmelCase__ , '''all_results.json''' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , '''r''' ) as f:
UpperCAmelCase__ : int = json.load(lowerCAmelCase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Dict:
UpperCAmelCase__ : Dict = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( __a ):
@classmethod
def lowercase_ ( cls : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[Any] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase_ ( cls : str ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Optional[int] = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Any = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Tuple = get_results(_A )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Any = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Union[str, Any] = get_results(_A )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : str = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Optional[Any] = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : str = get_results(_A )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Any = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : str = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_A , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[str] = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Union[str, Any] = get_results(_A )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Optional[Any] = get_results(_A )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''translation_no_trainer''' ) ) )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_A )
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[str] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : List[Any] = get_results(_A )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Tuple = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(_A )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_A , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''image_classification_no_trainer''' ) ) )
| 312 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
snake_case : str = logging.get_logger(__name__)
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ):
'''simple docstring'''
__lowercase = WavaVecaForSequenceClassification.from_pretrained(A_ , config=A_ )
__lowercase = downstream_dict["""projector.weight"""]
__lowercase = downstream_dict["""projector.bias"""]
__lowercase = downstream_dict["""model.post_net.linear.weight"""]
__lowercase = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
__lowercase = WavaVecaForAudioFrameClassification.from_pretrained(A_ , config=A_ )
__lowercase = downstream_dict["""model.linear.weight"""]
__lowercase = downstream_dict["""model.linear.bias"""]
return model
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = WavaVecaForXVector.from_pretrained(A_ , config=A_ )
__lowercase = downstream_dict["""connector.weight"""]
__lowercase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__lowercase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__lowercase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__lowercase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowercase__ ( __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = torch.load(A_ , map_location="""cpu""" )
__lowercase = checkpoint["""Downstream"""]
__lowercase = WavaVecaConfig.from_pretrained(A_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
A_ , return_attention_mask=A_ , do_normalize=A_ )
__lowercase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__lowercase = convert_classification(A_ , A_ , A_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
__lowercase = convert_diarization(A_ , A_ , A_ )
elif arch.endswith("""ForXVector""" ):
__lowercase = convert_xvector(A_ , A_ , A_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
snake_case : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 566 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A_ ) -> list[int]: # This function is recursive
_lowercase = len(A_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_lowercase = array[0]
_lowercase = False
_lowercase = 1
_lowercase = []
while not is_found and i < array_length:
if array[i] < pivot:
_lowercase = True
_lowercase = [element for element in array[i:] if element >= array[i]]
_lowercase = longest_subsequence(A_ )
if len(A_ ) > len(A_ ):
_lowercase = temp_array
else:
i += 1
_lowercase = [element for element in array[1:] if element >= pivot]
_lowercase = [pivot, *longest_subsequence(A_ )]
if len(A_ ) > len(A_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase_ : Union[str, Any] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ : int = concatenate_datasets
lowerCAmelCase_ : List[Any] = DownloadConfig
lowerCAmelCase_ : Union[str, Any] = DownloadManager
lowerCAmelCase_ : Union[str, Any] = DownloadMode
lowerCAmelCase_ : List[str] = DownloadConfig
lowerCAmelCase_ : List[Any] = DownloadMode
lowerCAmelCase_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 716 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : str , lowercase : int , lowercase : Any , lowercase : Any ) -> Any:
# Initialise PyTorch model
_a = FunnelConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
_a = FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 521 | 0 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
lowerCamelCase_ = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" , revision=_lowerCAmelCase )
| 675 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase : Any = HUGGINGFACE_HUB_CACHE
lowercase : Any = "config.json"
lowercase : Any = "diffusion_pytorch_model.bin"
lowercase : Optional[Any] = "diffusion_flax_model.msgpack"
lowercase : Optional[Any] = "model.onnx"
lowercase : List[str] = "diffusion_pytorch_model.safetensors"
lowercase : Any = "weights.pb"
lowercase : Tuple = "https://huggingface.co"
lowercase : int = default_cache_path
lowercase : List[str] = "diffusers_modules"
lowercase : Tuple = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
lowercase : Tuple = ["fp16", "non-ema"]
lowercase : str = ".self_attn" | 327 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'ernie_m'
SCREAMING_SNAKE_CASE_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , SCREAMING_SNAKE_CASE__ = 25_00_02 , SCREAMING_SNAKE_CASE__ = 7_68 , SCREAMING_SNAKE_CASE__ = 12 , SCREAMING_SNAKE_CASE__ = 12 , SCREAMING_SNAKE_CASE__ = 30_72 , SCREAMING_SNAKE_CASE__ = "gelu" , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = 0.1 , SCREAMING_SNAKE_CASE__ = 5_14 , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1e-05 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.0 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Optional[int] = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : Tuple = classifier_dropout
_snake_case : Optional[int] = is_decoder
_snake_case : List[Any] = act_dropout
| 519 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase_ = list[list[float | int]]
def UpperCAmelCase ( A__ , A__ ) -> Matrix:
_snake_case : int = len(A__ )
_snake_case : Matrix = [[0 for _ in range(size + 1 )] for _ in range(A__ )]
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : float
for row in range(A__ ):
for col in range(A__ ):
_snake_case : Optional[int] = matrix[row][col]
_snake_case : Dict = vector[row][0]
_snake_case : str = 0
_snake_case : Any = 0
while row < size and col < size:
# pivoting
_snake_case : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A__ , A__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_snake_case , _snake_case : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A__ ):
_snake_case : Optional[int] = augmented[rowa][col] / augmented[row][col]
_snake_case : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A__ ):
for row in range(A__ ):
_snake_case : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(A__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A__ )
]
def UpperCAmelCase ( A__ ) -> Callable[[int], int]:
_snake_case : int = len(A__ )
_snake_case : Matrix = [[0 for _ in range(A__ )] for _ in range(A__ )]
_snake_case : Matrix = [[0] for _ in range(A__ )]
_snake_case : Matrix
_snake_case : int
_snake_case : int
_snake_case : int
for x_val, y_val in enumerate(A__ ):
for col in range(A__ ):
_snake_case : Any = (x_val + 1) ** (size - col - 1)
_snake_case : Dict = y_val
_snake_case : List[str] = solve(A__ , A__ )
def interpolated_func(A__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A__ ) )
return interpolated_func
def UpperCAmelCase ( A__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase ( A__ = question_function , A__ = 10 ) -> int:
_snake_case : list[int] = [func(A__ ) for x_val in range(1 , order + 1 )]
_snake_case : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_snake_case : int = 0
_snake_case : Callable[[int], int]
_snake_case : int
for poly in polynomials:
_snake_case : int = 1
while func(A__ ) == poly(A__ ):
x_val += 1
ret += poly(A__ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 519 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : Tuple ): # noqa: E741
'''simple docstring'''
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
_SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 18 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Any = RoFormerTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ) -> Tuple:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
_UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('''lowercase''' , _A) != do_lower_case
or pre_tok_state.get('''strip_accents''' , _A) != strip_accents
):
_UpperCAmelCase : str = getattr(_A , pre_tok_state.pop('''type'''))
_UpperCAmelCase : Tuple = do_lower_case
_UpperCAmelCase : int = strip_accents
_UpperCAmelCase : List[str] = pre_tok_class(**_A)
_UpperCAmelCase : Tuple = do_lower_case
def __getstate__( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.__dict__.copy()
_UpperCAmelCase : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__( self , _A) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = d
_UpperCAmelCase : Tuple = self.__dict__['''_tokenizer'''].get_vocab()
_UpperCAmelCase : Tuple = PreTokenizer.custom(JiebaPreTokenizer(_A))
def snake_case__ ( self , _A , _A=None) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , _A , _A = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self , _A , _A = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def snake_case__ ( self , _A , _A=None , _A=None , _A=False , **_A , ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(_A , _A , _A , _A , **_A)
| 186 |
def _lowerCamelCase ( __A : int ) -> bool:
if not isinstance(__A , __A ):
_UpperCAmelCase : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__A )
if number < 0:
return False
_UpperCAmelCase : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | 1 |
lowercase__ : Union[str, Any] = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case_ = Stack()
snake_case_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
snake_case_ = operator_stack.peek()
operator_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operators[opr](_A , _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase__ : Tuple = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 376 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = MobileBertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
snake_case_ = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
snake_case_ = load_tf_weights_in_mobilebert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 376 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase__ ( self : str ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_input_output_texts(lowerCamelCase )
lowercase__ = tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.decode(lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def lowercase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MecabTokenizer(do_lower_case=lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Any ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(
do_lower_case=lowerCamelCase, normalize_text=lowerCamelCase, mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = MecabTokenizer(normalize_text=lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''], )
@require_sudachi
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@require_sudachi
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人''', '''参政権'''] )
@require_sudachi
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人参政権'''] )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(do_lower_case=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(normalize_text=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(trim_whitespace=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(normalize_text=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(trim_whitespace=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ), ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''], )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase__ = {}
for i, token in enumerate(lowerCamelCase ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ), ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ), ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase__ = tokenizer.subword_tokenizer
lowercase__ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase, ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase__ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase, ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase__ = tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : str, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='''character''', **lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowercase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='''character''' )
lowercase__ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase, ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ = {}
for i, token in enumerate(lowerCamelCase ):
lowercase__ = i
lowercase__ = CharacterTokenizer(vocab=lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase__ = tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cl-tohoku/bert-base-japanese'''
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase__ = '''bert-base-cased'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def __lowercase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case ):
http_head('''https://huggingface.co''' )
| 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : int = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Any = 1
__snake_case : Optional[int] = 3
__snake_case : Tuple = (32, 32)
__snake_case : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
__snake_case : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __snake_case ( self : Dict ) -> Tuple:
torch.manual_seed(0 )
__snake_case : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Union[str, Any]:
def extract(*lowerCamelCase : int , **lowerCamelCase : Union[str, Any] ):
class a :
"""simple docstring"""
def __init__( self : List[Any] ) -> int:
__snake_case : Optional[Any] = torch.ones([0] )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] ) -> Union[str, Any]:
self.pixel_values.to(lowerCamelCase )
return self
return Out()
return extract
def __snake_case ( self : Dict ) -> int:
__snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[int] = self.dummy_cond_unet
__snake_case : int = PNDMScheduler(skip_prk_steps=lowerCamelCase )
__snake_case : Optional[int] = self.dummy_vae
__snake_case : Any = self.dummy_text_encoder
__snake_case : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case : List[str] = 77
__snake_case : Optional[Any] = self.dummy_image.to(lowerCamelCase )
__snake_case : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__snake_case : Any = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=self.dummy_extractor , )
__snake_case : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase )
__snake_case : Dict = alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : List[str] = "A painting of a squirrel eating a burger"
__snake_case : List[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : Any = alt_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , )
__snake_case : Optional[int] = output.images
__snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = alt_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , return_dict=lowerCamelCase , )[0]
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
__snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : Optional[int] = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case ( self : List[Any] ) -> List[Any]:
__snake_case : List[Any] = self.dummy_cond_unet
__snake_case : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase )
__snake_case : int = self.dummy_vae
__snake_case : str = self.dummy_text_encoder
__snake_case : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case : Optional[int] = 77
__snake_case : Any = self.dummy_image.to(lowerCamelCase )
# put models in fp16
__snake_case : Any = unet.half()
__snake_case : List[Any] = vae.half()
__snake_case : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case : int = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=self.dummy_extractor , )
__snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase )
__snake_case : List[str] = alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Dict = "A painting of a squirrel eating a burger"
__snake_case : str = torch.manual_seed(0 )
__snake_case : List[Any] = alt_pipe(
[prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__snake_case : Tuple = init_image.resize((760, 504) )
__snake_case : str = "BAAI/AltDiffusion"
__snake_case : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase , safety_checker=lowerCamelCase , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__snake_case : Union[str, Any] = "A fantasy landscape, trending on artstation"
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase , output_type="np" , )
__snake_case : int = output.images[0]
__snake_case : Tuple = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__snake_case : List[str] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__snake_case : int = init_image.resize((768, 512) )
__snake_case : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__snake_case : int = "BAAI/AltDiffusion"
__snake_case : Any = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase , safety_checker=lowerCamelCase , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__snake_case : Optional[Any] = "A fantasy landscape, trending on artstation"
__snake_case : Dict = torch.manual_seed(0 )
__snake_case : List[Any] = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase , output_type="np" , )
__snake_case : Optional[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 706 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_snake_case : List[Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_snake_case : Any = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_snake_case : str = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return float((preds == labels).mean() )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="binary" ):
__snake_case : Union[str, Any] = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = {}
for id_pred, label in zip(__lowerCamelCase , __lowerCamelCase ):
__snake_case : int = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__snake_case : str = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__snake_case : Optional[Any] = [(pred, label)]
__snake_case , __snake_case : Dict = [], []
for question, preds_labels in question_map.items():
__snake_case , __snake_case : int = zip(*__lowerCamelCase )
__snake_case : Optional[Any] = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average="macro" )
fas.append(__lowerCamelCase )
__snake_case : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowerCamelCase ) )
ems.append(__lowerCamelCase )
__snake_case : Tuple = float(sum(__lowerCamelCase ) / len(__lowerCamelCase ) )
__snake_case : Any = sum(__lowerCamelCase ) / len(__lowerCamelCase )
__snake_case : int = float(fa_score(y_true=__lowerCamelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __snake_case ( self : Any ) -> int:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase , lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase , lowerCamelCase , fa_avg="macro" )
elif self.config_name == "record":
__snake_case : Tuple = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__snake_case : List[str] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(lowerCamelCase , lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 203 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : List[Any] = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = ['pixel_values']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 384}
snake_case : int = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = do_resize
snake_case : Any = size
# Default value set here for backwards compatibility where the value in config is None
snake_case : int = crop_pct if crop_pct is not None else 224 / 256
snake_case : str = resample
snake_case : List[str] = do_rescale
snake_case : Optional[int] = rescale_factor
snake_case : Dict = do_normalize
snake_case : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
snake_case : List[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
snake_case : List[Any] = int(shortest_edge / crop_pct )
snake_case : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE_ ,size=(shortest_edge, shortest_edge) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE_ ,size=(shortest_edge, shortest_edge) ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct
snake_case : Union[str, Any] = resample if resample is not None else self.resample
snake_case : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case : str = image_mean if image_mean is not None else self.image_mean
snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,crop_pct=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Any = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
| 36 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def A_ ( snake_case_ : int ,snake_case_ : int = 2 ,snake_case_ : int = 1 ,snake_case_ : int = 3 ,):
'''simple docstring'''
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ : int ,snake_case_ : int ,snake_case_ : int ) -> int:
return (pow(snake_case_ ,2 ) + step) % modulus
for _ in range(snake_case_ ):
# These track the position within the cycle detection logic.
UpperCamelCase : Optional[Any] = seed
UpperCamelCase : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase : int = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Dict = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Union[str, Any] = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase : str = gcd(hare - tortoise ,snake_case_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__A : Any = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__A : Optional[int] = parser.parse_args()
__A : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
__A : Optional[Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 499 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( _A ):
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "BlipImageProcessor"
snake_case__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
a_ : Dict = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
a_ : int = self.image_processor
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : ImageInput = None , __SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
a_ : Optional[int] = self.tokenizer
a_ : List[str] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
a_ : Any = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
a_ : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
a_ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Optional[int] = self.tokenizer.model_input_names
a_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
a_ : List[Any] = DetaConfig(
backbone_config=__A , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__A , with_box_refine=__A , two_stage=__A , )
# set labels
a_ : Optional[Any] = '''huggingface/label-files'''
if "o365" in model_name:
a_ : Optional[Any] = 3_66
a_ : Tuple = '''object365-id2label.json'''
else:
a_ : Any = 91
a_ : Union[str, Any] = '''coco-detection-id2label.json'''
a_ : Tuple = num_labels
a_ : str = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __A : List[str] ):
a_ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( __A : str , __A : int , __A : Tuple ):
a_ : str = dct.pop(__A )
a_ : Dict = val
def _UpperCAmelCase ( __A : List[str] , __A : Optional[int] ):
a_ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a_ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[:dim, :]
a_ : List[Any] = in_proj_bias[: dim]
a_ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
a_ : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : int = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( __A : Dict , __A : Dict ):
# transformer decoder self-attention layers
a_ : Any = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a_ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a_ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[:hidden_size, :]
a_ : Tuple = in_proj_bias[:hidden_size]
a_ : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size:, :]
a_ : int = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
a_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __A : int , __A : int , __A : Any ):
a_ : Union[str, Any] = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
a_ : Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
a_ : List[str] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
a_ : List[Any] = torch.load(__A , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__A , param.shape )
# rename keys
a_ : Union[str, Any] = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_swin_q_k_v(__A , config.backbone_config )
read_in_decoder_q_k_v(__A , __A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a_ : Optional[Any] = state_dict.pop(__A )
a_ : int = val
if "input_proj" in key:
a_ : str = state_dict.pop(__A )
a_ : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a_ : List[str] = state_dict.pop(__A )
a_ : List[Any] = val
# finally, create HuggingFace model and load state dict
a_ : Dict = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
a_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__A )
# load image processor
a_ : List[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
a_ : Dict = prepare_img()
a_ : Optional[int] = processor(images=__A , return_tensors='''pt''' )
a_ : Any = encoding['''pixel_values''']
a_ : int = model(pixel_values.to(__A ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a_ : Optional[int] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a_ : Tuple = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a_ : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a_ : Any = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__A ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__A ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = False ,) -> Tuple:
super().__init__()
A = nn.Embedding(lowerCamelCase_ ,lowerCamelCase_ )
A = nn.Embedding(lowerCamelCase_ ,lowerCamelCase_ )
A = False
A = nn.Dropout(p=lowerCamelCase_ )
A = TaConfig(
vocab_size=lowerCamelCase_ ,d_model=lowerCamelCase_ ,num_heads=lowerCamelCase_ ,d_kv=lowerCamelCase_ ,d_ff=lowerCamelCase_ ,dropout_rate=lowerCamelCase_ ,feed_forward_proj=lowerCamelCase_ ,is_decoder=lowerCamelCase_ ,is_encoder_decoder=lowerCamelCase_ ,)
A = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
A = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
A = TaLayerNorm(lowerCamelCase_ )
A = nn.Dropout(p=lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
A = self.token_embedder(lowerCamelCase_ )
A = encoder_input_tokens.shape[1]
A = torch.arange(lowerCamelCase_ ,device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
A = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
A = encoder_input_tokens.size()
A = self.get_extended_attention_mask(lowerCamelCase_ ,lowerCamelCase_ )
for lyr in self.encoders:
A = lyr(lowerCamelCase_ ,lowerCamelCase_ )[0]
A = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 617 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A ( __UpperCamelCase ) -> int:
A__ = 384
if "tiny" in model_name:
A__ = [3, 3, 9, 3]
A__ = [96, 192, 384, 768]
if "small" in model_name:
A__ = [3, 3, 27, 3]
A__ = [96, 192, 384, 768]
if "base" in model_name:
A__ = [3, 3, 27, 3]
A__ = [128, 256, 512, 1_024]
A__ = 512
if "large" in model_name:
A__ = [3, 3, 27, 3]
A__ = [192, 384, 768, 1_536]
A__ = 768
if "xlarge" in model_name:
A__ = [3, 3, 27, 3]
A__ = [256, 512, 1_024, 2_048]
A__ = 1_024
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = ConvNextConfig(
depths=__UpperCamelCase , hidden_sizes=__UpperCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A__ = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['state_dict']
A__ = get_upernet_config(__UpperCamelCase )
A__ = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(__UpperCamelCase )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(__UpperCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(__UpperCamelCase )
if model_name == "upernet-convnext-tiny":
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase__ : List[str] = (low + high) // 2
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = max_subarray(lowercase__ , lowercase__ , lowercase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = max_subarray(lowercase__ , mid + 1 , lowercase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = max_cross_sum(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = float("-inf" ), -1
UpperCAmelCase__ , UpperCAmelCase__ : str = float("-inf" ), -1
UpperCAmelCase__ : int | float = 0
for i in range(lowercase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase__ : Optional[int] = summ
UpperCAmelCase__ : Dict = i
UpperCAmelCase__ : List[Any] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase__ : int = summ
UpperCAmelCase__ : Union[str, Any] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : List[str] = [randint(1 , lowercase__ ) for _ in range(lowercase__ )]
UpperCAmelCase__ : List[Any] = time.time()
max_subarray(lowercase__ , 0 , input_size - 1 )
UpperCAmelCase__ : Any = time.time()
return end - start
def snake_case_ ( ):
UpperCAmelCase__ : Optional[int] = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
UpperCAmelCase__ : str = [time_max_subarray(lowercase__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowercase__ , lowercase__ ):
print(lowercase__ , "\t\t" , lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 199 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : torch.FloatTensor
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : Tuple=3 , snake_case__ : Dict=3 , snake_case__ : Dict=("DownEncoderBlock2D",) , snake_case__ : Optional[Any]=(64,) , snake_case__ : List[Any]=2 , snake_case__ : Any=32 , snake_case__ : Tuple="silu" , snake_case__ : Tuple=True , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Tuple = layers_per_block
UpperCAmelCase__ : Optional[int] = torch.nn.Convad(
snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = nn.ModuleList([] )
# down
UpperCAmelCase__ : Any = block_out_channels[0]
for i, down_block_type in enumerate(snake_case__ ):
UpperCAmelCase__ : List[str] = output_channel
UpperCAmelCase__ : Dict = block_out_channels[i]
UpperCAmelCase__ : Tuple = i == len(snake_case__ ) - 1
UpperCAmelCase__ : Dict = get_down_block(
snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , )
self.down_blocks.append(snake_case__ )
# mid
UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# out
UpperCAmelCase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6 )
UpperCAmelCase__ : Union[str, Any] = nn.SiLU()
UpperCAmelCase__ : Dict = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ : Union[str, Any] = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 )
UpperCAmelCase__ : Union[str, Any] = False
def UpperCamelCase ( self : int , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = x
UpperCAmelCase__ : Dict = self.conv_in(snake_case__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case__ : Dict ):
def custom_forward(*snake_case__ : List[str] ):
return module(*snake_case__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ )
# middle
UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ )
# middle
UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ : Dict = down_block(snake_case__ )
# middle
UpperCAmelCase__ : Optional[Any] = self.mid_block(snake_case__ )
# post-process
UpperCAmelCase__ : Tuple = self.conv_norm_out(snake_case__ )
UpperCAmelCase__ : List[Any] = self.conv_act(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = self.conv_out(snake_case__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , snake_case__ : int=3 , snake_case__ : str=3 , snake_case__ : Union[str, Any]=("UpDecoderBlock2D",) , snake_case__ : Dict=(64,) , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=32 , snake_case__ : str="silu" , snake_case__ : Any="group" , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = layers_per_block
UpperCAmelCase__ : Any = nn.Convad(
snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = nn.ModuleList([] )
UpperCAmelCase__ : str = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , )
# up
UpperCAmelCase__ : Tuple = list(reversed(snake_case__ ) )
UpperCAmelCase__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
UpperCAmelCase__ : Dict = output_channel
UpperCAmelCase__ : List[Any] = reversed_block_out_channels[i]
UpperCAmelCase__ : List[Any] = i == len(snake_case__ ) - 1
UpperCAmelCase__ : Tuple = get_up_block(
snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , )
self.up_blocks.append(snake_case__ )
UpperCAmelCase__ : str = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ : Optional[Any] = SpatialNorm(block_out_channels[0] , snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6 )
UpperCAmelCase__ : Dict = nn.SiLU()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 )
UpperCAmelCase__ : Union[str, Any] = False
def UpperCamelCase ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple=None ):
'''simple docstring'''
UpperCAmelCase__ : str = z
UpperCAmelCase__ : List[str] = self.conv_in(snake_case__ )
UpperCAmelCase__ : Tuple = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case__ : Dict ):
def custom_forward(*snake_case__ : List[Any] ):
return module(*snake_case__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
UpperCAmelCase__ : List[Any] = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ )
else:
# middle
UpperCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ )
UpperCAmelCase__ : int = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ )
else:
# middle
UpperCAmelCase__ : Union[str, Any] = self.mid_block(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = sample.to(snake_case__ )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ : int = up_block(snake_case__ , snake_case__ )
# post-process
if latent_embeds is None:
UpperCAmelCase__ : List[Any] = self.conv_norm_out(snake_case__ )
else:
UpperCAmelCase__ : Any = self.conv_norm_out(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[Any] = self.conv_act(snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.conv_out(snake_case__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]="random" , snake_case__ : Any=False , snake_case__ : Any=True ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = n_e
UpperCAmelCase__ : str = vq_embed_dim
UpperCAmelCase__ : List[Any] = beta
UpperCAmelCase__ : List[Any] = legacy
UpperCAmelCase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ : Optional[Any] = self.used.shape[0]
UpperCAmelCase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ : Union[str, Any] = self.re_embed
UpperCAmelCase__ : Optional[Any] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ : int = n_e
UpperCAmelCase__ : Dict = sane_index_shape
def UpperCamelCase ( self : Tuple , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = inds.shape
assert len(snake_case__ ) > 1
UpperCAmelCase__ : Tuple = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ : Optional[Any] = self.used.to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ : Optional[int] = match.argmax(-1 )
UpperCAmelCase__ : str = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ : Union[str, Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ : Tuple = self.unknown_index
return new.reshape(snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = inds.shape
assert len(snake_case__ ) > 1
UpperCAmelCase__ : List[Any] = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ : int = self.used.to(snake_case__ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ : Any = 0 # simply set to zero
UpperCAmelCase__ : List[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ )
return back.reshape(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ : str = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ : Dict = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ : Any = self.embedding(snake_case__ ).view(z.shape )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ : Optional[int] = self.remap_to_used(snake_case__ )
UpperCAmelCase__ : Optional[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
if self.remap is not None:
UpperCAmelCase__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ : Dict = self.unmap_to_all(snake_case__ )
UpperCAmelCase__ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ : Optional[Any] = self.embedding(snake_case__ )
if shape is not None:
UpperCAmelCase__ : List[str] = z_q.view(snake_case__ )
# reshape back to match original input shape
UpperCAmelCase__ : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parameters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = torch.chunk(snake_case__ , 2 , dim=1 )
UpperCAmelCase__ : int = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ : Optional[Any] = deterministic
UpperCAmelCase__ : Dict = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ : List[str] = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ : Dict = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[torch.Generator] = None ):
'''simple docstring'''
UpperCAmelCase__ : int = randn_tensor(
self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ : List[str] = self.mean + self.std * sample
return x
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def UpperCamelCase ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Tuple=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ : str = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.mean
| 199 | 1 |
a_ : Any = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
a_ : Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def __a ( __UpperCAmelCase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __a ( __UpperCAmelCase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __a ( ):
a__ = '''Morse code here!'''
print(__UpperCAmelCase )
a__ = encrypt(__UpperCAmelCase )
print(__UpperCAmelCase )
a__ = decrypt(__UpperCAmelCase )
print(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 148 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = jnp.ones((batch_size, length) ) / length
return scores
def _UpperCAmelCase ( self ) -> int:
a__ = None
a__ = 2_0
a__ = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
a__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
a__ = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
a__ = jax.nn.softmax(SCREAMING_SNAKE_CASE , axis=-1 )
a__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
a__ = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE , scores.copy() , cur_len=SCREAMING_SNAKE_CASE ) , axis=-1 )
a__ = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE , scores.copy() , cur_len=SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _UpperCAmelCase ( self ) -> Dict:
a__ = None
a__ = 1_0
a__ = 2
# create ramp distribution
a__ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
a__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
a__ = FlaxTopKLogitsWarper(3 )
a__ = top_k_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
a__ = 5
a__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
a__ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
a__ = top_k_warp_safety_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _UpperCAmelCase ( self ) -> List[str]:
a__ = None
a__ = 1_0
a__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
a__ = FlaxTopPLogitsWarper(0.8 )
a__ = np.exp(top_p_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
a__ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a__ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
a__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
a__ = top_p_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _UpperCAmelCase ( self ) -> str:
a__ = 2_0
a__ = 4
a__ = 0
a__ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
a__ = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
a__ = 5
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = min_dist_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = 1_5
a__ = min_dist_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE ).any() )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = 2_0
a__ = 4
a__ = 0
a__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
a__ = ids_tensor((batch_size, 1) , vocab_size=2_0 )
a__ = 1
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = logits_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a__ = 3
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = logits_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE ).any() )
def _UpperCAmelCase ( self ) -> str:
a__ = 2_0
a__ = 4
a__ = 0
a__ = 5
a__ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
a__ = ids_tensor((batch_size, 4) , vocab_size=2_0 )
a__ = 4
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = logits_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a__ = 3
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = logits_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE ).any() )
def _UpperCAmelCase ( self ) -> List[str]:
a__ = 4
a__ = 1_0
a__ = 1_5
a__ = 2
a__ = 1
a__ = 1_5
# dummy input_ids and scores
a__ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE )
a__ = input_ids.copy()
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = scores.copy()
# instantiate all dist processors
a__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ = FlaxTopKLogitsWarper(3 )
a__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a__ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE )
a__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE )
a__ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
a__ = 1_0
# no processor list
a__ = temp_dist_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = top_k_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = top_p_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = min_dist_proc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = bos_dist_proc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = eos_dist_proc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
# with processor list
a__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a__ = processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = 4
a__ = 1_0
a__ = 1_5
a__ = 2
a__ = 1
a__ = 1_5
# dummy input_ids and scores
a__ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE )
a__ = input_ids.copy()
a__ = self._get_uniform_logits(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = scores.copy()
# instantiate all dist processors
a__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ = FlaxTopKLogitsWarper(3 )
a__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a__ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE )
a__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE )
a__ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
a__ = 1_0
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ = temp_dist_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = top_k_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = top_p_warp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = min_dist_proc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = bos_dist_proc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
a__ = eos_dist_proc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a__ = processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cur_len=SCREAMING_SNAKE_CASE )
return scores
a__ = jax.jit(SCREAMING_SNAKE_CASE )
a__ = jax.jit(SCREAMING_SNAKE_CASE )
a__ = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = jitted_run_processor_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 148 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : List[Any] = to_pil_image(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pil_image.size
UpperCAmelCase__ : List[str] = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type='dict' ,config=_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase__ : Optional[Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
UpperCAmelCase__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
UpperCAmelCase__ : str = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ : Any = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : List[str] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
UpperCAmelCase__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( lowercase ):
UpperCamelCase : List[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = "" , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase_ )
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Optional[Any] = size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : List[str] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_value
UpperCAmelCase__ : List[str] = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ : Optional[Any] = apply_ocr
UpperCAmelCase__ : Optional[Any] = ocr_lang
UpperCAmelCase__ : Tuple = tesseract_config
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = None , **UpperCamelCase_ , ):
UpperCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase__ : List[Any] = (size['height'], size['width'])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
UpperCAmelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase_ )
UpperCAmelCase__ : Any = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : str = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ : Any = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = apply_tesseract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
words_batch.append(UpperCamelCase_ )
boxes_batch.append(UpperCamelCase_ )
if do_resize:
UpperCAmelCase__ : Optional[int] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase__ : int = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase__ : Optional[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
UpperCAmelCase__ : List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCAmelCase__ : int = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase_ )
if apply_ocr:
UpperCAmelCase__ : List[Any] = words_batch
UpperCAmelCase__ : Any = boxes_batch
return data
| 110 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __A : int , __A : Any=1_3 , __A : int=3_0 , __A : Optional[int]=2 , __A : Dict=3 , __A : Optional[Any]=True , __A : str=True , __A : List[str]=3_2 , __A : Union[str, Any]=5 , __A : str=4 , __A : Any=3_7 , __A : Optional[Any]="gelu" , __A : Any=0.1 , __A : int=0.1 , __A : Optional[int]=1_0 , __A : List[str]=0.0_2 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Union[str, Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : int = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Tuple = (image_size // patch_size) ** 2
snake_case__ : Optional[int] = num_patches + 1
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase ( self : Dict , __A : Optional[int] , __A : Dict ):
snake_case__ : Dict = FlaxViTModel(config=__A )
snake_case__ : Dict = model(__A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (self.image_size, self.image_size)
snake_case__ : Union[str, Any] = (self.patch_size, self.patch_size)
snake_case__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase ( self : List[Any] , __A : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = self.type_sequence_label_size
snake_case__ : int = FlaxViTForImageClassification(config=__A )
snake_case__ : Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Tuple = 1
snake_case__ : Tuple = FlaxViTForImageClassification(__A )
snake_case__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = model(__A )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
),
) : Optional[Any] = config_and_inputs
snake_case__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = FlaxViTModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _lowercase ( self : Any ):
snake_case__, snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(__A )
snake_case__ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : str = self._prepare_for_class(__A , __A )
snake_case__ : Optional[int] = model_class(__A )
@jax.jit
def model_jitted(__A : List[Any] , **__A : List[str] ):
return model(pixel_values=__A , **__A )
with self.subTest("JIT Enabled" ):
snake_case__ : Any = model_jitted(**__A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ : str = model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self : int ):
for model_class_name in self.all_model_classes:
snake_case__ : List[str] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
snake_case__ : Union[str, Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__A )
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
A_: Optional[int] = logging.getLogger(__name__)
A_: Tuple = {'facebook/bart-base': BartForConditionalGeneration}
A_: Any = {'facebook/bart-base': BartTokenizer}
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=__UpperCAmelCase ,default=__UpperCAmelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=__UpperCAmelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=__UpperCAmelCase ,default=__UpperCAmelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=__UpperCAmelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=__UpperCAmelCase ,)
parser.add_argument(
"""--config_name""" ,type=__UpperCAmelCase ,default=__UpperCAmelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=__UpperCAmelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=__UpperCAmelCase ,default=__UpperCAmelCase ,help="""Where to store the final ONNX file.""" )
_lowercase = parser.parse_args()
return args
def __lowerCAmelCase ( _A ,_A="cpu" ):
"""simple docstring"""
_lowercase = model_dict[model_name].from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
_lowercase = tokenizer_dict[model_name].from_pretrained(__UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_lowercase = 0
_lowercase = None
_lowercase = 0
return huggingface_model, tokenizer
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
model.eval()
_lowercase = None
_lowercase = torch.jit.script(BARTBeamSearchGenerator(__UpperCAmelCase ) )
with torch.no_grad():
_lowercase = """My friends are cool but they eat too many carbs."""
_lowercase = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1_024 ,return_tensors="""pt""" ).to(model.device )
_lowercase = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=__UpperCAmelCase ,max_length=__UpperCAmelCase ,early_stopping=__UpperCAmelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
__UpperCAmelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,__UpperCAmelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=__UpperCAmelCase ,)
logger.info("""Model exported to {}""".format(__UpperCAmelCase ) )
_lowercase = remove_dup_initializers(os.path.abspath(__UpperCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__UpperCAmelCase ) )
_lowercase = onnxruntime.InferenceSession(__UpperCAmelCase )
_lowercase = ort_sess.run(
__UpperCAmelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__UpperCAmelCase ),
"""max_length""": np.array(__UpperCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1E-3 ,atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = parse_args()
_lowercase = 5
_lowercase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase = torch.device(args.device )
_lowercase , _lowercase = load_model_tokenizer(args.model_name_or_path ,__UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__UpperCAmelCase )
if args.max_length:
_lowercase = args.max_length
if args.num_beams:
_lowercase = args.num_beams
if args.output_file_path:
_lowercase = args.output_file_path
else:
_lowercase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if __name__ == "__main__":
main()
| 398 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Tuple = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_SCREAMING_SNAKE_CASE = (low + high) // 2
_SCREAMING_SNAKE_CASE = max_subarray(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_SCREAMING_SNAKE_CASE = max_subarray(_lowerCamelCase ,mid + 1 ,_lowerCamelCase )
_SCREAMING_SNAKE_CASE = max_cross_sum(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> tuple[int, int, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = float("""-inf""" ), -1
_SCREAMING_SNAKE_CASE = float("""-inf""" ), -1
_SCREAMING_SNAKE_CASE = 0
for i in range(_lowerCamelCase ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
_SCREAMING_SNAKE_CASE = summ
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
_SCREAMING_SNAKE_CASE = summ
_SCREAMING_SNAKE_CASE = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCamelCase ( snake_case__ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(1 ,_lowerCamelCase ) for _ in range(_lowerCamelCase )]
_SCREAMING_SNAKE_CASE = time.time()
max_subarray(_lowerCamelCase ,0 ,input_size - 1 )
_SCREAMING_SNAKE_CASE = time.time()
return end - start
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
_SCREAMING_SNAKE_CASE = [time_max_subarray(_lowerCamelCase ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(_lowerCamelCase ,_lowerCamelCase ):
print(_lowerCamelCase ,"""\t\t""" ,_lowerCamelCase )
plt.plot(_lowerCamelCase ,_lowerCamelCase )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 706 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case__ ):
for j in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCamelCase = imread('''image_data/lena.jpg''', 1)
# convert to its negative
UpperCamelCase = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 569 | 0 |
# flake8: noqa
# Lint as: python3
lowerCamelCase : Any = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 70 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
a__ : Dict = tmp_path_factory.getbasetemp() / "cache"
a__ : int = test_hf_cache_home / "datasets"
a__ : Tuple = test_hf_cache_home / "metrics"
a__ : Any = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCAmelCase__ ) )
a__ : Optional[int] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
a__ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__ , scope="session" )
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCAmelCase__ )
@pytest.fixture
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCAmelCase__ ) | 642 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'longformer'
def __init__( self , A_ = 5_12 , A_ = 2 , A_ = 1 , A_ = 0 , A_ = 2 , A_ = 3_05_22 , A_ = 7_68 , A_ = 12 , A_ = 12 , A_ = 30_72 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 5_12 , A_ = 2 , A_ = 0.02 , A_ = 1E-1_2 , A_ = False , **A_ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
_lowerCamelCase = attention_window
_lowerCamelCase = sep_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = eos_token_id
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = onnx_export
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ , A_ = "default" , A_ = None ) -> List[Any]:
"""simple docstring"""
super().__init__(A_ , A_ , A_ )
_lowerCamelCase = True
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowerCamelCase = super().outputs
if self.task == "default":
_lowerCamelCase = {0: '''batch'''}
return outputs
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def UpperCamelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_lowerCamelCase = super().generate_dummy_inputs(
preprocessor=A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowerCamelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
_lowerCamelCase = 1
return inputs | 638 | def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 638 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCamelCase ( lowerCamelCase : List[str]):
if "model" in orig_key:
A_ : List[str] = orig_key.replace("""model.""" , """""")
if "norm1" in orig_key:
A_ : List[str] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""")
if "norm2" in orig_key:
A_ : str = orig_key.replace("""norm2""" , """output.LayerNorm""")
if "norm" in orig_key:
A_ : List[Any] = orig_key.replace("""norm""" , """LayerNorm""")
if "transformer" in orig_key:
A_ : Optional[int] = orig_key.split(""".""")[0].split("""_""")[-1]
A_ : Optional[int] = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}')
if "mha.attn" in orig_key:
A_ : int = orig_key.replace("""mha.attn""" , """attention.self""")
if "mha" in orig_key:
A_ : int = orig_key.replace("""mha""" , """attention""")
if "W_q" in orig_key:
A_ : List[str] = orig_key.replace("""W_q""" , """self.query""")
if "W_k" in orig_key:
A_ : Optional[Any] = orig_key.replace("""W_k""" , """self.key""")
if "W_v" in orig_key:
A_ : List[Any] = orig_key.replace("""W_v""" , """self.value""")
if "ff1" in orig_key:
A_ : Tuple = orig_key.replace("""ff1""" , """intermediate.dense""")
if "ff2" in orig_key:
A_ : List[Any] = orig_key.replace("""ff2""" , """output.dense""")
if "ff" in orig_key:
A_ : Dict = orig_key.replace("""ff""" , """output.dense""")
if "mlm_class" in orig_key:
A_ : Tuple = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""")
if "mlm" in orig_key:
A_ : Dict = orig_key.replace("""mlm""" , """cls.predictions.transform""")
if "cls" not in orig_key:
A_ : List[Any] = """yoso.""" + orig_key
return orig_key
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str):
for key in orig_state_dict.copy().keys():
A_ : Optional[int] = orig_state_dict.pop(lowerCamelCase)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A_ : Union[str, Any] = val
A_ : Any = orig_state_dict["""cls.predictions.decoder.bias"""]
A_ : List[Any] = torch.arange(lowerCamelCase).expand((1, -1)) + 2
return orig_state_dict
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple):
A_ : Optional[Any] = torch.load(lowerCamelCase , map_location="""cpu""")["""model_state_dict"""]
A_ : List[str] = YosoConfig.from_json_file(lowerCamelCase)
A_ : Tuple = YosoForMaskedLM(lowerCamelCase)
A_ : List[str] = convert_checkpoint_helper(config.max_position_embeddings , lowerCamelCase)
print(model.load_state_dict(lowerCamelCase))
model.eval()
model.save_pretrained(lowerCamelCase)
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}')
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any]=False ) -> Dict:
'''simple docstring'''
try:
__lowerCamelCase : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase : Dict = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase : str = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__A = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
return unittest.skip("Test was skipped" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: str ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: List[str] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple=None , _lowerCamelCase: Dict=None ) -> List[Any]:
'''simple docstring'''
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase )
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase ) , F"""test requires torch version >= {version}""" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_lowerCamelCase )
__A = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase )
class _snake_case ( unittest.TestCase ):
snake_case__ = True
@classmethod
def lowerCamelCase__ ( cls : List[Any] ):
__lowerCamelCase : int = tempfile.mkdtemp()
@classmethod
def lowerCamelCase__ ( cls : Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase__ ( self : str ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase )
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase : Any = mocks if isinstance(UpperCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = AcceleratorState()
__lowerCamelCase : Any = tensor[None].clone().to(state.device )
__lowerCamelCase : str = gather(_lowerCamelCase ).cpu()
__lowerCamelCase : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowerCamelCase ):
return False
return True
class _snake_case :
def __init__( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Optional[int] = returncode
__lowerCamelCase : str = stdout
__lowerCamelCase : Dict = stderr
async def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
while True:
__lowerCamelCase : Any = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[str]=None , _lowerCamelCase: Tuple=None , _lowerCamelCase: List[str]=None , _lowerCamelCase: List[Any]=False , _lowerCamelCase: Dict=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase ) )
__lowerCamelCase : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[int] = []
def tee(_lowerCamelCase: List[str] , _lowerCamelCase: List[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Dict="" ):
__lowerCamelCase : Optional[Any] = line.decode("utf-8" ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any]=None , _lowerCamelCase: List[Any]=None , _lowerCamelCase: List[Any]=180 , _lowerCamelCase: List[str]=False , _lowerCamelCase: Union[str, Any]=True ) -> _RunOutput:
'''simple docstring'''
__lowerCamelCase : int = asyncio.get_event_loop()
__lowerCamelCase : Optional[int] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__lowerCamelCase : Tuple = " ".join(_lowerCamelCase )
if result.returncode > 0:
__lowerCamelCase : Dict = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class _snake_case ( a__ ):
pass
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: str=False ) -> Tuple:
'''simple docstring'''
try:
__lowerCamelCase : List[Any] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowerCamelCase , "decode" ):
__lowerCamelCase : Dict = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_lowerCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e | 366 | """simple docstring"""
import math
def lowercase_ ( _lowerCamelCase: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( _lowerCamelCase: int = 10001 ) -> int:
'''simple docstring'''
try:
__lowerCamelCase : Optional[int] = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
__lowerCamelCase : list[int] = []
__lowerCamelCase : Tuple = 2
while len(_lowerCamelCase ) < nth:
if is_prime(_lowerCamelCase ):
primes.append(_lowerCamelCase )
num += 1
else:
num += 1
return primes[len(_lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 366 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = value
a_ = None
a_ = None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = tree
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 536 |
'''simple docstring'''
import math
import qiskit
def __UpperCamelCase ( lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : int = 1 ):
"""simple docstring"""
if (
isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
or isinstance(lowercase_ , lowercase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != input_a)
or (math.floor(lowercase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a_ = qiskit.QuantumRegister(4 , 'qr' )
a_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a_ = [input_a, input_a, carry_in]
a_ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase_ ) # measure the last two qbits
a_ = qiskit.Aer.get_backend('aer_simulator' )
a_ = qiskit.execute(lowercase_ , lowercase_ , shots=1_000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 536 | 1 |
'''simple docstring'''
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
snake_case__ = str(bin(_A ) )
binary_number += "0" * shift_amount
return binary_number
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
snake_case__ = str(bin(_A ) )[2:]
if shift_amount >= len(_A ):
return "0b0"
snake_case__ = binary_number[: len(_A ) - shift_amount]
return "0b" + shifted_binary_number
def a_ ( _A , _A ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
snake_case__ = '0' + str(bin(_A ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case__ = len(bin(_A )[3:] ) # Find 2's complement of number
snake_case__ = bin(abs(_A ) - (1 << binary_number_length) )[3:]
snake_case__ = (
'1' + '0' * (binary_number_length - len(_A )) + binary_number
)
if shift_amount >= len(_A ):
return "0b" + binary_number[0] * len(_A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: Any ) -> List[Any]:
snake_case__ = data
snake_case__ = None
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> Optional[Any]:
snake_case__ = None
snake_case__ = None
def __iter__( self: Optional[int] ) -> Iterator[Any]:
snake_case__ = self.head
while self.head:
yield node.data
snake_case__ = node.next
if node == self.head:
break
def __len__( self: int ) -> int:
return sum(1 for _ in self )
def __repr__( self: int ) -> Optional[int]:
return "->".join(str(UpperCamelCase ) for item in iter(self ) )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> None:
self.insert_nth(len(self ) , UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Any ) -> None:
self.insert_nth(0 , UpperCamelCase )
def lowerCAmelCase_ ( self: int , UpperCamelCase: int , UpperCamelCase: Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
snake_case__ = Node(UpperCamelCase )
if self.head is None:
snake_case__ = new_node # first node points itself
snake_case__ = snake_case__ = new_node
elif index == 0: # insert at head
snake_case__ = self.head
snake_case__ = snake_case__ = new_node
else:
snake_case__ = self.head
for _ in range(index - 1 ):
snake_case__ = temp.next
snake_case__ = temp.next
snake_case__ = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ = new_node
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
return self.delete_nth(0 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase_ ( self: str , UpperCamelCase: int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
snake_case__ = self.head
if self.head == self.tail: # just one node
snake_case__ = snake_case__ = None
elif index == 0: # delete head node
snake_case__ = self.tail.next.next
snake_case__ = self.head.next
else:
snake_case__ = self.head
for _ in range(index - 1 ):
snake_case__ = temp.next
snake_case__ = temp.next
snake_case__ = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ = temp
return delete_node.data
def lowerCAmelCase_ ( self: Union[str, Any] ) -> bool:
return len(self ) == 0
def a_ ( ) -> None:
"""simple docstring"""
snake_case__ = CircularLinkedList()
assert len(_A ) == 0
assert circular_linked_list.is_empty() is True
assert str(_A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_A ) == i
circular_linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 372 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase_ : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase_ : str = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class __UpperCAmelCase ( __a ):
'''simple docstring'''
lowercase : List[str] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] = BlenderbotSmallTokenizer
def __init__( self , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , _A=True , **_A , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
_SCREAMING_SNAKE_CASE =add_prefix_space
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 255 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __a ):
_A :Optional[Any] = ['''image_processor''', '''tokenizer''']
_A :List[str] = '''ViTImageProcessor'''
_A :Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : str , snake_case__ : List[str]=None , snake_case__ : str=None , **snake_case__ : Union[str, Any] ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case__ , )
lowercase = kwargs.pop("""feature_extractor""" )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : str , snake_case__ : Tuple=None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None , snake_case__ : List[str]=None , **snake_case__ : Optional[Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
lowercase = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if visual_prompt is not None:
lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if visual_prompt is not None and images is not None:
lowercase = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *snake_case__ : int , **snake_case__ : Optional[int] ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , snake_case__ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , snake_case__ , )
return self.image_processor
| 428 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['PoolFormerFeatureExtractor']
lowerCamelCase = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 333 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCAmelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 333 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_="pt" ):
__SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True} if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not line.startswith(""" """ ) else {}
__SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=UpperCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , ):
__SCREAMING_SNAKE_CASE = input_ids.ne(UpperCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ):
super().__init__()
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).joinpath(type_path + """.source""")
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).joinpath(type_path + """.target""")
__SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file)
__SCREAMING_SNAKE_CASE = max_source_length
__SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens) > 0, f"found empty line in {self.src_file}"
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
__SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self):
return len(self.src_lens)
def __getitem__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
__SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file) , lowerCAmelCase__).rstrip("""\n""")
__SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file) , lowerCAmelCase__).rstrip("""\n""")
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__) else self.tokenizer
)
__SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__) else self.tokenizer
__SCREAMING_SNAKE_CASE = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , """right""")
__SCREAMING_SNAKE_CASE = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , """right""")
__SCREAMING_SNAKE_CASE = source_inputs["""input_ids"""].squeeze()
__SCREAMING_SNAKE_CASE = target_inputs["""input_ids"""].squeeze()
__SCREAMING_SNAKE_CASE = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( lowerCAmelCase__):
return [len(lowerCAmelCase__) for x in Path(lowerCAmelCase__).open().readlines()]
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.stack([x["""input_ids"""] for x in batch])
__SCREAMING_SNAKE_CASE = torch.stack([x["""attention_mask"""] for x in batch])
__SCREAMING_SNAKE_CASE = torch.stack([x["""decoder_input_ids"""] for x in batch])
__SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__)
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__)
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE = trim_batch(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def _lowerCAmelCase ( UpperCamelCase_ ):
return list(itertools.chain.from_iterable(UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = get_git_info()
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , """git_log.json""" ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=4 , **UpperCamelCase_ ):
with open(UpperCamelCase_ , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ , indent=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
with open(UpperCamelCase_ ) as f:
return json.load(UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = {
"""repo_id""": str(UpperCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return list(map(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , """wb""" ) as f:
return pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
def remove_articles(UpperCamelCase_ ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = normalize_answer(UpperCamelCase_ ).split()
__SCREAMING_SNAKE_CASE = normalize_answer(UpperCamelCase_ ).split()
__SCREAMING_SNAKE_CASE = Counter(UpperCamelCase_ ) & Counter(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(UpperCamelCase_ , UpperCamelCase_ ):
em += exact_match_score(UpperCamelCase_ , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
em /= len(UpperCamelCase_ )
return {"em": em}
def _lowerCAmelCase ( UpperCamelCase_ ):
return model_prefix.startswith("""rag""" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__SCREAMING_SNAKE_CASE = """dropout_rate"""
for p in extra_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if not hasattr(UpperCamelCase_ , UpperCamelCase_ ) and not hasattr(UpperCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(UpperCamelCase_ ) )
delattr(UpperCamelCase_ , UpperCamelCase_ )
continue
__SCREAMING_SNAKE_CASE = p if hasattr(UpperCamelCase_ , UpperCamelCase_ ) else equivalent_param[p]
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
delattr(UpperCamelCase_ , UpperCamelCase_ )
return hparams, config
| 248 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if numbers[j] < numbers[i]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 248 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.