code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from itertools import count
def UpperCamelCase ( snake_case__ : int = 50 ) -> int:
UpperCamelCase : List[str] = [1] * min_block_length
for n in count(snake_case__ ):
fill_count_functions.append(1 )
for block_length in range(snake_case__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A_ = True
except ImportError:
A_ = False
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( __UpperCamelCase ) -> str:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=SCREAMING_SNAKE_CASE_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=SCREAMING_SNAKE_CASE_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , *SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = testing
lowerCamelCase_ = testing_file
lowerCamelCase_ = path
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
lowerCamelCase_ = (
Path(SCREAMING_SNAKE_CASE_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE_ , extra_context=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
lowerCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = configuration['lowercase_modelname']
lowerCamelCase_ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
lowerCamelCase_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = 'Flax' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=SCREAMING_SNAKE_CASE_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
lowerCamelCase_ = f.readlines()
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Create temp file
lowerCamelCase_ ,lowerCamelCase_ = mkstemp()
lowerCamelCase_ = False
with fdopen(SCREAMING_SNAKE_CASE_ , 'w' ) as new_file:
with open(SCREAMING_SNAKE_CASE_ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE_ )
if line_to_copy_below in line:
lowerCamelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Remove original file
remove(SCREAMING_SNAKE_CASE_ )
# Move new file
move(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def skip_units(SCREAMING_SNAKE_CASE_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ ) as datafile:
lowerCamelCase_ = []
lowerCamelCase_ = False
lowerCamelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ = line.split('"' )[1]
lowerCamelCase_ = skip_units(SCREAMING_SNAKE_CASE_ )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ = line.split('"' )[1]
lowerCamelCase_ = skip_units(SCREAMING_SNAKE_CASE_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE_ )
remove(SCREAMING_SNAKE_CASE_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[float('''inf''' ) for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowercase__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
lowerCAmelCase = int(input('Enter number of vertices: '))
lowerCAmelCase = int(input('Enter number of edges: '))
lowerCAmelCase = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase = int(input('Enter source:'))
lowerCAmelCase = int(input('Enter destination:'))
lowerCAmelCase = float(input('Enter weight:'))
lowerCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 43 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 44 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
from math import factorial
UpperCamelCase = {str(d): factorial(d) for d in range(10)}
def A ( lowercase__ : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase__ ) )
def A ( ) -> int:
UpperCamelCase__ :Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase__ ) if sum_of_digit_factorial(lowercase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''') | 45 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 46 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
while a != 0:
__a , __a : Dict = b % a, a
return b
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) != 1:
__a : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(lowerCamelCase_ )
__a , __a , __a : Dict = 1, 0, a
__a , __a , __a : List[str] = 0, 1, m
while va != 0:
__a : Union[str, Any] = ua // va
__a , __a , __a , __a , __a , __a : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 47 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , *__magic_name__ : Any , **__magic_name__ : Dict ):
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :int ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__UpperCAmelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__UpperCAmelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Tuple = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="dpt"
def __init__( self : Optional[int] , a__ : int=768 , a__ : List[Any]=12 , a__ : Union[str, Any]=12 , a__ : List[str]=3072 , a__ : Optional[int]="gelu" , a__ : List[Any]=0.0 , a__ : List[str]=0.0 , a__ : Dict=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Any=384 , a__ : List[str]=16 , a__ : Tuple=3 , a__ : int=False , a__ : List[Any]=True , a__ : Any=[2, 5, 8, 11] , a__ : Any="project" , a__ : Any=[4, 2, 1, 0.5] , a__ : Union[str, Any]=[96, 192, 384, 768] , a__ : Dict=256 , a__ : Dict=-1 , a__ : str=False , a__ : Union[str, Any]=True , a__ : Optional[Any]=0.4 , a__ : Union[str, Any]=255 , a__ : int=0.1 , a__ : str=[1, 1024, 24, 24] , a__ : str=[0, 1] , a__ : Union[str, Any]=None , **a__ : str , ):
super().__init__(**a__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
UpperCAmelCase = BitConfig(**a__ )
elif isinstance(a__ , a__ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
UpperCAmelCase = BitConfig(**a__ )
elif isinstance(a__ , a__ ):
UpperCAmelCase = backbone_config
else:
raise ValueError(
f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
UpperCAmelCase = backbone_featmap_shape
UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = []
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
UpperCAmelCase = readout_type
UpperCAmelCase = reassemble_factors
UpperCAmelCase = neck_hidden_sizes
UpperCAmelCase = fusion_hidden_size
UpperCAmelCase = head_in_index
UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = semantic_loss_ignore_index
UpperCAmelCase = semantic_classifier_dropout
def __snake_case ( self : List[str] ):
UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 51 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = CLIPConfig
__lowerCAmelCase = ['''CLIPEncoderLayer''']
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__a : Optional[Any] = CLIPVisionModelWithProjection(config.vision_config )
__a : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
__a : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.5 , _UpperCAmelCase=0.5 ):
__a : int = self.vision_model(_UpperCAmelCase )[0]
__a : str = self.p_head(_UpperCAmelCase )
__a : Tuple = nsfw_detected.flatten()
__a : str = nsfw_detected > p_threshold
__a : Dict = nsfw_detected.tolist()
if any(_UpperCAmelCase ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(_UpperCAmelCase ):
if nsfw_detected_:
__a : str = np.zeros(images[idx].shape )
__a : str = self.w_head(_UpperCAmelCase )
__a : int = watermark_detected.flatten()
__a : Any = watermark_detected > w_threshold
__a : Optional[int] = watermark_detected.tolist()
if any(_UpperCAmelCase ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(_UpperCAmelCase ):
if watermark_detected_:
__a : Union[str, Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 52 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Dict = logging.get_logger()
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : LevitConfig, lowerCAmelCase_ : Path, lowerCAmelCase_ : bool = True ):
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__lowerCAmelCase = timm.create_model('levit_128s', pretrained=lowerCAmelCase_ )
else:
__lowerCAmelCase = timm.create_model('levit_128', pretrained=lowerCAmelCase_ )
if hidden_sizes == 192:
__lowerCAmelCase = timm.create_model('levit_192', pretrained=lowerCAmelCase_ )
if hidden_sizes == 256:
__lowerCAmelCase = timm.create_model('levit_256', pretrained=lowerCAmelCase_ )
if hidden_sizes == 384:
__lowerCAmelCase = timm.create_model('levit_384', pretrained=lowerCAmelCase_ )
from_model.eval()
__lowerCAmelCase = LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = from_model.state_dict()
__lowerCAmelCase = list(from_model.state_dict().keys() )
__lowerCAmelCase = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) )
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase_ )
__lowerCAmelCase = torch.randn((2, 3, 224, 224) )
__lowerCAmelCase = from_model(lowerCAmelCase_ )
__lowerCAmelCase = our_model(lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one."
__lowerCAmelCase = name
print(lowerCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowerCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def a_ ( lowerCAmelCase_ : Path, lowerCAmelCase_ : str = None, lowerCAmelCase_ : bool = True ):
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = 1000
__lowerCAmelCase = (1, num_labels)
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ )
__lowerCAmelCase = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
__lowerCAmelCase = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], lowerCAmelCase_, names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_snake_case : List[Any] = parser.parse_args()
_snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 53 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
import math
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int = 6 ):
__A = None
__A = None
self.create_linked_list(A )
def UpperCamelCase_ ( self : Optional[Any] ,A : int ):
__A = Node()
__A = current_node
__A = current_node
__A = current_node
for _ in range(1 ,A ):
__A = Node()
__A = current_node
__A = previous_node
__A = current_node
__A = self.front
__A = previous_node
def UpperCamelCase_ ( self : Optional[int] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCamelCase_ ( self : Optional[int] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCamelCase_ ( self : Any ,A : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__A = self.rear.next
if self.rear:
__A = data
def UpperCamelCase_ ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__A = self.front.data
__A = None
return data
__A = self.front
__A = old_front.next
__A = old_front.data
__A = None
return data
def UpperCamelCase_ ( self : str ):
if self.is_empty():
raise Exception("Empty Queue" )
def UpperCamelCase_ ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
__A = None
__A = None
__A = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : List[Any] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "open-llama"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=10_0000 , SCREAMING_SNAKE_CASE_ : int=4096 , SCREAMING_SNAKE_CASE_ : str=1_1008 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : Optional[int]="silu" , SCREAMING_SNAKE_CASE_ : Dict=2048 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1e-6 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = rms_norm_eps
__snake_case = use_cache
__snake_case = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_dropout_prob
__snake_case = attention_dropout_prob
__snake_case = use_stable_embedding
__snake_case = shared_input_output_embedding
__snake_case = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a ( self : List[Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
__snake_case = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
__snake_case = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 56 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import math
class _lowerCAmelCase:
"""simple docstring"""
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: int = 0.0
UpperCamelCase_: Tuple = 0.0
for i in range(len(_lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for i in range(len(_lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case () -> None:
# Training Examples ( m, n )
UpperCamelCase_: List[str] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCamelCase_: List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCamelCase_: Dict = SelfOrganizingMap()
UpperCamelCase_: List[Any] = 3
UpperCamelCase_: List[str] = 0.5
for _ in range(UpperCAmelCase__ ):
for j in range(len(UpperCAmelCase__ ) ):
# training sample
UpperCamelCase_: int = training_samples[j]
# Compute the winning vector
UpperCamelCase_: Tuple = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ )
# Update the winning vector
UpperCamelCase_: Union[str, Any] = self_organizing_map.update(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# classify test sample
UpperCamelCase_: Dict = [0, 0, 0, 1]
UpperCamelCase_: Union[str, Any] = self_organizing_map.get_winner(UpperCAmelCase__ , UpperCAmelCase__ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main() | 57 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCAmelCase : Optional[int] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "Salesforce/blip-image-captioning-base"
lowercase_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowercase_ = "image_captioner"
lowercase_ = AutoModelForVisionaSeq
lowercase_ = ["image"]
lowercase_ = ["text"]
def __init__(self : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->int:
'''simple docstring'''
requires_backends(self , ["vision"])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : "Image") ->Optional[Any]:
'''simple docstring'''
return self.pre_processor(images=UpperCAmelCase_ , return_tensors="pt")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Any) ->int:
'''simple docstring'''
return self.model.generate(**UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)[0].strip()
| 59 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=True , __magic_name__=1 / 255 , __magic_name__=True , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
snake_case_ : List[Any] = parent
snake_case_ : Dict = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Tuple = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Any = do_resize
snake_case_ : Union[str, Any] = size
snake_case_ : Any = do_normalize
snake_case_ : Optional[int] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_pad
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase (self , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
if not batched:
snake_case_ : Tuple = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
snake_case_ , snake_case_ : Optional[Any] = image.size
else:
snake_case_ , snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size['''shortest_edge'''] * h / w )
snake_case_ : Any = self.size['''shortest_edge''']
elif w > h:
snake_case_ : List[str] = self.size['''shortest_edge''']
snake_case_ : int = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case_ : Tuple = self.size['''shortest_edge''']
snake_case_ : int = self.size['''shortest_edge''']
else:
snake_case_ : int = []
for image in image_inputs:
snake_case_ , snake_case_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : int = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
snake_case_ : Union[str, Any] = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
snake_case_ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
snake_case_ : int = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : int = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : int = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
snake_case_ : Optional[Any] = self.image_processing_class(do_resize=__magic_name__ , do_normalize=__magic_name__ , do_rescale=__magic_name__ )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : List[str] = image_processing_a.pad(__magic_name__ , return_tensors='''pt''' )
snake_case_ : Optional[Any] = image_processing_a(__magic_name__ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case_ : Tuple = json.loads(f.read() )
snake_case_ : int = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
snake_case_ : List[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
snake_case_ : Union[str, Any] = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
snake_case_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
snake_case_ : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
snake_case_ : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
snake_case_ : int = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
snake_case_ : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
snake_case_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
snake_case_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
snake_case_ : Tuple = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case_ : Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' )
snake_case_ : Tuple = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
snake_case_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
snake_case_ : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
snake_case_ : Any = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
snake_case_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
snake_case_ : Any = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
snake_case_ : Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
snake_case_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify masks
snake_case_ : List[Any] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __magic_name__ )
# verify orig_size
snake_case_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
snake_case_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
| 60 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "char"
snake_case__ = "bpe"
snake_case__ = "wp"
UpperCamelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["image_processor", "char_tokenizer"]
snake_case__ = "ViTImageProcessor"
snake_case__ = "MgpstrTokenizer"
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : str ) -> str:
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(SCREAMING_SNAKE_CASE__ , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(SCREAMING_SNAKE_CASE__ , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(SCREAMING_SNAKE_CASE__ , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(SCREAMING_SNAKE_CASE__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(f'Format {format} is not supported.' )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=SCREAMING_SNAKE_CASE__ , sorted=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = preds_index.view(-1 , SCREAMING_SNAKE_CASE__ )[:, 1:]
lowerCAmelCase__ = decoder(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(SCREAMING_SNAKE_CASE__ , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_str[index].find(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(SCREAMING_SNAKE_CASE__ ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(SCREAMING_SNAKE_CASE__ )
conf_scores.append(SCREAMING_SNAKE_CASE__ )
return dec_strs, conf_scores
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )]
return decode_strs
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
return self.bpe_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )]
return decode_strs
| 61 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Any=[1, 2, 1] , UpperCAmelCase_ : Any=[2, 2, 4] , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=2.0 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : int=1E-5 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Dict=["stage1", "stage2", "stage3"] , UpperCAmelCase_ : Optional[int]=[1, 2, 3] , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = patch_norm
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : int = out_indices
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _A ( self : List[str] ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _A ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _A ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : int = MaskFormerSwinBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinBackbone(config=UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Tuple = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = False
UpperCamelCase_ : int = False
UpperCamelCase_ : int = False
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _A ( self : Optional[Any] ):
pass
def _A ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : Union[str, Any] ):
return
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
@unittest.skip("Swin does not use inputs_embeds" )
def _A ( self : Tuple ):
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _A ( self : Any ):
pass
def _A ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _A ( self : Optional[Any] ):
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _A ( self : List[str] ):
pass
def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _A ( self : List[Any] ):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _A ( self : int ):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _A ( self : Optional[int] ):
pass
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = 0
return t
def check_equivalence(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ ).to_tuple()
def recursive_check(UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
if isinstance(UpperCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase_ ) , set_nan_tensor_to_zero(UpperCAmelCase_ ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(UpperCAmelCase_ ).any()} and `inf`: {torch.isinf(UpperCAmelCase_ )}. Dict has'''
f''' `nan`: {torch.isnan(UpperCAmelCase_ ).any()} and `inf`: {torch.isinf(UpperCAmelCase_ )}.'''
) , )
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {"output_hidden_states": True} )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase_ : Optional[int] = MaskFormerSwinConfig
def _A ( self : str ):
SCREAMING_SNAKE_CASE : str = MaskFormerSwinModelTester(self )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = backbone_class(UpperCAmelCase_ )
backbone.to(UpperCAmelCase_ )
backbone.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = backbone(**UpperCAmelCase_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE : Optional[int] = backbone(**UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE : Any = backbone(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
self.assertIsNotNone(outputs.attentions )
| 62 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'camembert-base': 512,
}
__UpperCAmelCase = '▁'
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = CamembertTokenizer
def __init__( self : List[str] ,A : Optional[int]=None ,A : List[str]=None ,A : List[Any]="<s>" ,A : Optional[int]="</s>" ,A : Optional[Any]="</s>" ,A : str="<s>" ,A : Optional[Any]="<unk>" ,A : Tuple="<pad>" ,A : int="<mask>" ,A : Tuple=["<s>NOTUSED", "</s>NOTUSED"] ,**A : str ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
A ,tokenizer_file=A ,bos_token=A ,eos_token=A ,sep_token=A ,cls_token=A ,unk_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,**A ,)
UpperCAmelCase__ : List[str] = vocab_file
UpperCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : str ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : str ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Dict = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file ,A )
return (out_vocab_file,)
| 65 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
# Load checkpoint
_lowercase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
_lowercase : Tuple = chkpt['model']
# We have the base model one level deeper than the original XLM repository
_lowercase : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_lowercase : Optional[Any] = v
else:
_lowercase : Optional[Any] = v
_lowercase : Optional[int] = chkpt['params']
_lowercase : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
_lowercase : Optional[int] = chkpt['dico_word2id']
_lowercase : Tuple = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
_lowercase : str = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowercase : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
_lowercase : Optional[Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '\n' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '\n' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 66 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowercase = 1
_lowercase = 1
while repunit:
_lowercase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 100_0000 ) -> int:
_lowercase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> Dict:
__snake_case = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__snake_case = Dataset.from_dict(_UpperCAmelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Any ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case = make_duplicate_clusters(a_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case , __snake_case = deduplicate_dataset(a_ )
self.assertEqual(len(a_ ) , 2 )
print(a_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a_ )
| 69 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = args.pruning_method
lowerCamelCase_ = args.threshold
lowerCamelCase_ = args.model_name_or_path.rstrip('/' )
lowerCamelCase_ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowerCamelCase_ = torch.load(os.path.join(lowercase , 'pytorch_model.bin' ) )
lowerCamelCase_ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase_ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCamelCase_ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowerCamelCase_ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCamelCase_ = MagnitudeBinarizer.apply(inputs=lowercase , threshold=lowercase )
lowerCamelCase_ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase_ = name[:-6]
lowerCamelCase_ = model[f"""{prefix_}mask_scores"""]
lowerCamelCase_ = TopKBinarizer.apply(lowercase , lowercase )
lowerCamelCase_ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase_ = name[:-6]
lowerCamelCase_ = model[f"""{prefix_}mask_scores"""]
lowerCamelCase_ = ThresholdBinarizer.apply(lowercase , lowercase , lowercase )
lowerCamelCase_ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase_ = name[:-6]
lowerCamelCase_ = model[f"""{prefix_}mask_scores"""]
lowerCamelCase_ , lowerCamelCase_ = -0.1, 1.1
lowerCamelCase_ = torch.sigmoid(lowercase )
lowerCamelCase_ = s * (r - l) + l
lowerCamelCase_ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCamelCase_ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCamelCase_ = os.path.join(
os.path.dirname(lowercase ) , f"""bertarized_{os.path.basename(lowercase )}""" )
if not os.path.isdir(lowercase ):
shutil.copytree(lowercase , lowercase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase , os.path.join(lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
lowerCamelCase : Tuple = parser.parse_args()
main(args)
| 70 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_UpperCAmelCase : Dict = logging.get_logger(__name__)
enable_full_determinism()
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = 'sample'
@property
def _A( self ):
lowercase =4
lowercase =3
lowercase =(32, 32)
lowercase =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
lowercase =torch.tensor([10] ).to(snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def _A( self ):
return (3, 32, 32)
@property
def _A( self ):
return (3, 32, 32)
def _A( self ):
lowercase ={
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowercase =self.dummy_input
return init_dict, inputs_dict
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = 'sample'
@property
def _A( self ):
lowercase =4
lowercase =4
lowercase =(32, 32)
lowercase =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
lowercase =torch.tensor([10] ).to(snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def _A( self ):
return (4, 32, 32)
@property
def _A( self ):
return (4, 32, 32)
def _A( self ):
lowercase ={
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowercase =self.dummy_input
return init_dict, inputs_dict
def _A( self ):
lowercase , lowercase =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case_ )
lowercase =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _A( self ):
lowercase , lowercase =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ )
model.to(snake_case_ )
lowercase =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _A( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowercase , lowercase =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ )
model_accelerate.to(snake_case_ )
model_accelerate.eval()
lowercase =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase =noise.to(snake_case_ )
lowercase =torch.tensor([10] * noise.shape[0] ).to(snake_case_ )
lowercase =model_accelerate(snake_case_ , snake_case_ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase , lowercase =UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ , low_cpu_mem_usage=snake_case_ )
model_normal_load.to(snake_case_ )
model_normal_load.eval()
lowercase =model_normal_load(snake_case_ , snake_case_ )['''sample''']
assert torch_all_close(snake_case_ , snake_case_ , rtol=1E-3 )
def _A( self ):
lowercase =UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(snake_case_ )
lowercase =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase =noise.to(snake_case_ )
lowercase =torch.tensor([10] * noise.shape[0] ).to(snake_case_ )
with torch.no_grad():
lowercase =model(snake_case_ , snake_case_ ).sample
lowercase =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase =torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-3 ) )
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = 'sample'
@property
def _A( self , snake_case_=(32, 32) ):
lowercase =4
lowercase =3
lowercase =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
lowercase =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def _A( self ):
return (3, 32, 32)
@property
def _A( self ):
return (3, 32, 32)
def _A( self ):
lowercase ={
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowercase =self.dummy_input
return init_dict, inputs_dict
@slow
def _A( self ):
lowercase , lowercase =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case_ )
lowercase =self.dummy_input
lowercase =floats_tensor((4, 3) + (2_56, 2_56) ).to(snake_case_ )
lowercase =noise
lowercase =model(**snake_case_ )
assert image is not None, "Make sure output is not None"
@slow
def _A( self ):
lowercase =UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(snake_case_ )
lowercase =4
lowercase =3
lowercase =(2_56, 2_56)
lowercase =torch.ones((batch_size, num_channels) + sizes ).to(snake_case_ )
lowercase =torch.tensor(batch_size * [1E-4] ).to(snake_case_ )
with torch.no_grad():
lowercase =model(snake_case_ , snake_case_ ).sample
lowercase =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase =torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-2 ) )
def _A( self ):
lowercase =UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(snake_case_ )
lowercase =4
lowercase =3
lowercase =(32, 32)
lowercase =torch.ones((batch_size, num_channels) + sizes ).to(snake_case_ )
lowercase =torch.tensor(batch_size * [1E-4] ).to(snake_case_ )
with torch.no_grad():
lowercase =model(snake_case_ , snake_case_ ).sample
lowercase =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase =torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-2 ) )
def _A( self ):
# not required for this model
pass
| 72 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _snake_case ( yaml.SafeLoader ):
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE = [tuple(a) if isinstance(a , a) else key for key in keys]
SCREAMING_SNAKE_CASE = Counter(a)
SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''')
def SCREAMING_SNAKE_CASE__ ( self , a , a=False) -> str:
SCREAMING_SNAKE_CASE = super().construct_mapping(a , deep=a)
self._check_no_duplicates_on_constructed_node(a)
return mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE = full_content[1:].index('---') + 1
SCREAMING_SNAKE_CASE = '\n'.join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(_UpperCAmelCase)
class _snake_case ( A__ ):
# class attributes
_lowercase : Optional[Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> "DatasetMetadata":
with open(a , encoding='utf-8') as readme_file:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(a)
else:
return cls()
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
if path.exists():
with open(a , encoding='utf-8') as readme_file:
SCREAMING_SNAKE_CASE = readme_file.read()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self._to_readme(a)
with open(a , 'w' , encoding='utf-8') as readme_file:
readme_file.write(a)
def SCREAMING_SNAKE_CASE__ ( self , a = None) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(a)
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n' + content
else:
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE = yaml.load(a , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a , allow_unicode=a , encoding='utf-8' , ).decode('utf-8')
a_ : Optional[int] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ : int = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ : Optional[Any] = ap.parse_args()
a_ : Union[str, Any] = Path(args.readme_filepath)
a_ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 73 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase_ = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , _A : str = "" , _A : Optional[str] = None , _A : Optional[dict] = None , **_A : List[Any] ):
"""simple docstring"""
super().__init__(self , **_A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__SCREAMING_SNAKE_CASE : Tuple = fsspec.open(
_A , mode='''rb''' , protocol=_A , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__SCREAMING_SNAKE_CASE : Any = None
@classmethod
def UpperCAmelCase__ ( cls : List[str] , _A : Optional[int] ):
"""simple docstring"""
return super()._strip_protocol(_A ).lstrip('''/''' )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE : List[str] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {f['''name''']: f}
def UpperCAmelCase__ ( self : Dict , _A : str ):
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase__ ( self : Any , _A : str , _A : str = "rb" , _A : Tuple=None , _A : Union[str, Any]=True , _A : Dict=None , **_A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self._strip_protocol(_A )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''bz2'''
lowerCAmelCase_ = '''bz2'''
lowerCAmelCase_ = '''.bz2'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''gzip'''
lowerCAmelCase_ = '''gzip'''
lowerCAmelCase_ = '''.gz'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''lz4'''
lowerCAmelCase_ = '''lz4'''
lowerCAmelCase_ = '''.lz4'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''xz'''
lowerCAmelCase_ = '''xz'''
lowerCAmelCase_ = '''.xz'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''zstd'''
lowerCAmelCase_ = '''zstd'''
lowerCAmelCase_ = '''.zst'''
def __init__( self : str , _A : str , _A : str = "rb" , _A : Optional[str] = None , _A : Optional[dict] = None , _A : int = DEFAULT_BLOCK_SIZE , **_A : str , ):
"""simple docstring"""
super().__init__(
fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__SCREAMING_SNAKE_CASE : Optional[Any] = self.file.__enter__
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : int , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = file_
def __enter__( self : int ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Any , *_A : int , **_A : List[Any] ):
"""simple docstring"""
self._file.__exit__(*_A , **_A )
def __iter__( self : List[Any] ):
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Dict , _A : Optional[int] ):
"""simple docstring"""
return getattr(self._file , _A )
def fixed_enter(*_A : List[str] , **_A : str ):
return WrappedFile(_enter(*_A , **_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = fixed_enter
| 74 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''num_attention_heads''' ) )
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Tuple , _A : List[Any]=13 , _A : Union[str, Any]=32 , _A : int=2 , _A : int=3 , _A : List[Any]=640 , _A : Optional[int]=4 , _A : List[str]="silu" , _A : Optional[int]=3 , _A : Optional[Any]=32 , _A : str=0.1 , _A : List[str]=0.1 , _A : List[Any]=0.1 , _A : Optional[int]=0.0_2 , _A : Optional[int]=True , _A : str=True , _A : Dict=10 , _A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Any = last_hidden_size
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : Any = conv_kernel_size
UpperCAmelCase__ : Optional[Any] = output_stride
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = classifier_dropout_prob
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Tuple = scope
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self : int ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MobileViTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : List[Any] , _A : List[str] , _A : List[str] , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Optional[int] = MobileViTForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : Union[str, Any] , _A : Any , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : Any = MobileViTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase__ : Optional[Any] = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MobileViTModelTester(self )
UpperCAmelCase__ : Optional[int] = MobileViTConfigTester(self , config_class=_A , has_text_modality=_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(_A : Tuple , _A : Any , _A : List[Any] ):
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : List[Any] = outputs.hidden_states
UpperCAmelCase__ : Tuple = 5
self.assertEqual(len(_A ) , _A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase__ : Any = 2
for i in range(len(_A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Dict = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = MobileViTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_A )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Optional[int] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : Optional[Any] = model.to(_A )
UpperCAmelCase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_A )
UpperCAmelCase__ : List[Any] = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=_A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : str = model.to(_A )
UpperCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_A )
UpperCAmelCase__ : Tuple = outputs.logits.detach().cpu()
UpperCAmelCase__ : Dict = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(50, 60)] )
UpperCAmelCase__ : str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _A )
UpperCAmelCase__ : int = image_processor.post_process_semantic_segmentation(outputs=_A )
UpperCAmelCase__ : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _A )
| 75 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split('''.''' ):
__lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
__lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
__lowercase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase : List[str] = value
elif weight_type == "weight_g":
__lowercase : Optional[Any] = value
elif weight_type == "weight_v":
__lowercase : Tuple = value
elif weight_type == "bias":
__lowercase : Dict = value
else:
__lowercase : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Tuple = []
__lowercase : Union[str, Any] = fairseq_model.state_dict()
__lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__lowercase : int = True
if "*" in mapped_key:
__lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
__lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase )
if "weight_g" in name:
__lowercase : Tuple = '''weight_g'''
elif "weight_v" in name:
__lowercase : Optional[int] = '''weight_v'''
elif "weight" in name:
__lowercase : str = '''weight'''
elif "bias" in name:
__lowercase : Optional[int] = '''bias'''
else:
__lowercase : List[str] = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1]
__lowercase : str = name.split('''.''' )
__lowercase : Dict = int(items[0] )
__lowercase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
if config_path is not None:
__lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase )
else:
__lowercase : str = HubertConfig()
if is_finetuned:
if dict_path:
__lowercase : Tuple = Dictionary.load(__UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : int = target_dict.pad_index
__lowercase : Union[str, Any] = target_dict.bos_index
__lowercase : int = target_dict.eos_index
__lowercase : int = len(target_dict.symbols )
__lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) )
return
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __UpperCamelCase )
__lowercase : str = WavaVecaCTCTokenizer(
__UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , )
__lowercase : str = True if config.feat_extract_norm == '''layer''' else False
__lowercase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
__lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
__lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase )
else:
__lowercase : Union[str, Any] = HubertModel(__UpperCamelCase )
if is_finetuned:
__lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowercase : Union[str, Any] = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 76 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bridgetower_vision_model"""
def __init__(self : Optional[int] , __a : str=768 , __a : Dict=12 , __a : Optional[Any]=3 , __a : Dict=16 , __a : Dict=288 , __a : Union[str, Any]=1 , __a : Any=1E-05 , __a : Optional[Any]=False , __a : Optional[Any]=True , __a : Dict=False , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = stop_gradient
UpperCAmelCase_ = share_layernorm
UpperCAmelCase_ = remove_last_layer
@classmethod
def _lowercase (cls : List[str] , __a : Union[str, os.PathLike] , **__a : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = """bridgetower_text_model"""
def __init__(self : Tuple , __a : List[Any]=50265 , __a : int=768 , __a : int=12 , __a : str=12 , __a : Any=1 , __a : Dict=3072 , __a : int="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : Tuple=514 , __a : List[Any]=1 , __a : Tuple=1E-05 , __a : Any=1 , __a : Any=0 , __a : Dict=2 , __a : Optional[Any]="absolute" , __a : str=True , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
@classmethod
def _lowercase (cls : Optional[int] , __a : Union[str, os.PathLike] , **__a : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __A ( UpperCamelCase__ ):
a__ : List[str] = """bridgetower"""
def __init__(self : int , __a : Optional[Any]=True , __a : Optional[Any]="gelu" , __a : str=768 , __a : Optional[Any]=1 , __a : List[str]=1E-05 , __a : List[Any]=False , __a : int="add" , __a : Optional[int]=12 , __a : List[str]=6 , __a : Dict=False , __a : Optional[int]=False , __a : Optional[int]=None , __a : Tuple=None , **__a : str , ):
# TODO: remove this once the Hub files are updated.
UpperCAmelCase_ = kwargs.pop("text_config_dict" , __a )
UpperCAmelCase_ = kwargs.pop("vision_config_dict" , __a )
super().__init__(**__a )
UpperCAmelCase_ = share_cross_modal_transformer_layers
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = share_link_tower_layers
UpperCAmelCase_ = link_tower_type
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = tie_word_embeddings
UpperCAmelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
UpperCAmelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCAmelCase_ = BridgeTowerTextConfig(**__a )
UpperCAmelCase_ = BridgeTowerVisionConfig(**__a )
@classmethod
def _lowercase (cls : str , __a : BridgeTowerTextConfig , __a : BridgeTowerVisionConfig , **__a : Union[str, Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 78 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
UpperCAmelCase__ : int = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
UpperCAmelCase__ : List[str] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
UpperCAmelCase__ : str = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
UpperCAmelCase__ : Tuple = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
UpperCAmelCase__ : str = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
UpperCAmelCase__ : List[str] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
UpperCAmelCase__ : List[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase__ : Dict = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple[Dict, Dict]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(state_dict.keys() )
UpperCAmelCase__ : str = {}
for key in keys:
UpperCAmelCase__ : Tuple = state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = rename_keys(__lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase__ : Optional[int] = val[:hidden_size, :]
UpperCAmelCase__ : List[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase__ : Tuple = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase__ : Dict = val
else:
UpperCAmelCase__ : Dict = val
return state_dict, enc_dec_proj_state_dict
def _lowerCamelCase ( __lowerCamelCase ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase__ : Union[str, Any] = 1024
UpperCAmelCase__ : Union[str, Any] = 24
UpperCAmelCase__ : Dict = 16
elif checkpoint == "medium":
UpperCAmelCase__ : Optional[int] = 1536
UpperCAmelCase__ : int = 48
UpperCAmelCase__ : Tuple = 24
elif checkpoint == "large":
UpperCAmelCase__ : Any = 2048
UpperCAmelCase__ : Optional[Any] = 48
UpperCAmelCase__ : List[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase__ : Any = MusicgenDecoderConfig(
hidden_size=__lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , )
return config
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="cpu" ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = MusicGen.get_pretrained(__lowerCamelCase , device=__lowerCamelCase )
UpperCAmelCase__ : Optional[int] = decoder_config_from_checkpoint(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = fairseq_model.lm.state_dict()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = rename_state_dict(
__lowerCamelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
UpperCAmelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
UpperCAmelCase__ : Union[str, Any] = MusicgenForCausalLM(__lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = decoder.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase__ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=__lowerCamelCase , audio_encoder=__lowerCamelCase , decoder=__lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCamelCase )
# check we can do a forward pass
UpperCAmelCase__ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase__ : Any = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
UpperCAmelCase__ : Dict = MusicgenProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase__ : Dict = 2048
UpperCAmelCase__ : int = 2048
# set other default generation config params
UpperCAmelCase__ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__lowerCamelCase )
processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 79 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = 10
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = [1, 2, 3, 4]
__lowercase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__lowercase , __lowercase = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = """"""
__lowercase , __lowercase = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
self.assertEqual(_lowerCAmelCase , [] )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__lowercase , __lowercase = process_story(_lowerCAmelCase )
__lowercase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = ["""It was the best of times."""]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = torch.tensor([1, 2, 3, 4] )
__lowercase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = 101
__lowercase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase = compute_token_type_ids(_lowerCAmelCase , _lowerCAmelCase )
np.testing.assert_array_equal(_lowerCAmelCase , _lowerCAmelCase )
| 80 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 81 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''detr'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : Dict=2048 , _UpperCAmelCase : Optional[int]=8 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : int=2048 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]="relu" , _UpperCAmelCase : List[str]=256 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : int=False , _UpperCAmelCase : Any="sine" , _UpperCAmelCase : Union[str, Any]="resnet50" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : int=False , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.1 , **_UpperCAmelCase : Any , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = backbone_config.get("model_type" )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None, None, None
UpperCAmelCase_ = use_timm_backbone
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = auxiliary_loss
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = backbone
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = dilation
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = mask_loss_coefficient
UpperCAmelCase_ = dice_loss_coefficient
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Dict[str, any]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowercase__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
return 12
| 82 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=64 , snake_case=5 , snake_case=4 , snake_case=64 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = MPNetModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = MPNetForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = MPNetForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_choices
lowercase = MPNetForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = MPNetForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Tuple = False
_UpperCamelCase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MPNetModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase = model(snake_case )[0]
lowercase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
lowercase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
| 84 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ : Dict = open # noqa: we just need to have a builtin inside this module to test it properly
| 85 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any]=0 ):
"""simple docstring"""
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[column] )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__UpperCamelCase ):
A_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ = current_dis
return min_dis
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int]=float("inf" ) ):
"""simple docstring"""
for i in range(min(6 ,points_counts - 1 ) ,__UpperCamelCase ):
for j in range(max(0 ,i - 6 ) ,__UpperCamelCase ):
A_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ = current_dis
return min_dis
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(__UpperCamelCase ,__UpperCamelCase )
# recursion
A_ = points_counts // 2
A_ = closest_pair_of_points_sqr(
__UpperCamelCase ,points_sorted_on_y[:mid] ,__UpperCamelCase )
A_ = closest_pair_of_points_sqr(
__UpperCamelCase ,points_sorted_on_y[mid:] ,points_counts - mid )
A_ = min(__UpperCamelCase ,__UpperCamelCase )
A_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCamelCase )
A_ = dis_between_closest_in_strip(
__UpperCamelCase ,len(__UpperCamelCase ) ,__UpperCamelCase )
return min(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = column_based_sort(__UpperCamelCase ,column=0 )
A_ = column_based_sort(__UpperCamelCase ,column=1 )
return (
closest_pair_of_points_sqr(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
__a :List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 86 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Any = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_lowerCamelCase : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_lowerCamelCase : Any = """▁"""
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = AlbertTokenizer
def __init__( self : int , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[Any]="[CLS]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : int="<unk>" , UpperCAmelCase__ : Union[str, Any]="[SEP]" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : str="[MASK]" , **UpperCAmelCase__ : int , ) ->str:
'''simple docstring'''
A__ = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__)
else mask_token
)
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__):
copyfile(self.vocab_file , UpperCAmelCase__)
return (out_vocab_file,)
| 87 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''realm'''
def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1e-3 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=1335_3718 , SCREAMING_SNAKE_CASE=5000 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# Common config
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Optional[int] = retriever_proj_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Optional[int] = num_candidates
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[Any] = type_vocab_size
_lowerCamelCase : str = layer_norm_eps
# Reader config
_lowerCamelCase : str = span_hidden_size
_lowerCamelCase : Any = max_span_width
_lowerCamelCase : Optional[Any] = reader_layer_norm_eps
_lowerCamelCase : int = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : str = num_block_records
_lowerCamelCase : Tuple = searcher_beam_size
| 88 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 89 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , '''width_multiplier''' ) )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=64 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_="swish" , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=10 , lowerCamelCase_=None , lowerCamelCase_=0.25 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = make_divisible(5_12 * width_multiplier , divisor=8 )
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = conv_kernel_size
lowerCAmelCase__ = output_stride
lowerCAmelCase__ = classifier_dropout_prob
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = width_multiplier
lowerCAmelCase__ = ffn_dropout
lowerCAmelCase__ = attn_dropout
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = MobileViTVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MobileViTVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : int = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Optional[int] = False
lowercase__ : int = False
lowercase__ : List[str] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = MobileViTVaModelTester(self )
lowerCAmelCase__ = MobileViTVaConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = 5
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ = 2
for i in range(len(lowerCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = MobileViTVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> int:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=lowerCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ , target_sizes=[(50, 60)] )
lowerCAmelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCAmelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ ) | 90 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str:
super().__init__(
features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,)
A = Generator(
cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
A = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
A = None
A = None
A = None
A = None
self.builder.download_and_prepare(
download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,)
A = self.builder.as_dataset(
split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory )
return dataset | 91 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Optional[int] =PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=0 ):
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Any =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' )
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Dict =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : List[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Dict ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : str =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ ).images
lowercase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Union[str, Any] =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : Optional[int] =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Tuple ='''french fries'''
lowercase : Dict =sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : List[str] =output.images
lowercase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Any =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : Any =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =[inputs['''prompt''']] * 2
lowercase : int =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_55.0
lowercase : Tuple =torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase : List[Any] =image / 2 + 0.5
lowercase : List[str] =image.permute(0 , 3 , 1 , 2 )
lowercase : List[str] =image.repeat(2 , 1 , 1 , 1 )
lowercase : List[Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[int] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase : Union[str, Any] =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] =self.get_dummy_components()
lowercase : Any =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Any =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1]
lowercase : List[str] =[round(UpperCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(UpperCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase : Dict =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =VaeImageProcessor(do_resize=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ )
lowercase : str =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =pipe(**self.get_dummy_inputs_by_type(UpperCAmelCase__ , input_image_type='''pt''' ) )[0]
lowercase : List[str] =components['''vae''']
lowercase : str =self.get_dummy_inputs_by_type(UpperCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase : List[str] =vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase : List[Any] =pipe(**UpperCAmelCase__ )[0]
lowercase : Optional[int] =np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str=0 ):
'''simple docstring'''
lowercase : Any =torch.manual_seed(UpperCAmelCase__ )
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowercase : Optional[int] ={
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =self.get_inputs()
lowercase : Any =pipe(**UpperCAmelCase__ ).images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
lowercase : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Tuple =self.get_inputs()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
lowercase : Dict =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
lowercase : Dict =pipe(**UpperCAmelCase__ ).images
lowercase : List[str] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor ) -> None:
lowercase : str =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Optional[int] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Any =latents[0, -3:, -3:, -1]
lowercase : int =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Dict =latents[0, -3:, -3:, -1]
lowercase : Any =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Union[str, Any] =False
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Dict =self.get_inputs()
lowercase : List[Any] =pipe(**UpperCAmelCase__ )
lowercase : List[str] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[str] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase : Dict =inputs['''image'''].resize((504, 504) )
lowercase : Any ='''timbrooks/instruct-pix2pix'''
lowercase : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ )
lowercase : Dict =output.images[0]
lowercase : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase : Optional[Any] =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 92 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = "arrow" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = load_from_cache_file
lowerCAmelCase__ :int = file_format
lowerCAmelCase__ :int = Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ :Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
SCREAMING_SNAKE_CASE = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def A__ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase : List[Any] =[[refs[i] for refs in references] for i in range(UpperCAmelCase )]
lowercase : int =TER(
normalized=UpperCAmelCase , no_punct=UpperCAmelCase , asian_support=UpperCAmelCase , case_sensitive=UpperCAmelCase , )
lowercase : Union[str, Any] =sb_ter.corpus_score(UpperCAmelCase , UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 94 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def snake_case ( A__ ,A__=False ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = create_model(
"HTSAT-tiny" ,"roberta" ,A__ ,precision="fp32" ,device="cuda:0" if torch.cuda.is_available() else "cpu" ,enable_fusion=A__ ,fusion_type="aff_2d" if enable_fusion else None ,)
return model, model_cfg
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : Optional[Any] = r".*sequential.(\d+).*"
UpperCAmelCase_ : int = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ : Tuple = key.replace(A__ ,A__ )
if re.match(A__ ,A__ ):
# replace sequential layers with list
UpperCAmelCase_ : int = re.match(A__ ,A__ ).group(1 )
UpperCAmelCase_ : List[str] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(A__ )//3}.linear.""" )
elif re.match(A__ ,A__ ):
UpperCAmelCase_ : Any = int(re.match(A__ ,A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ : Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ : List[str] = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ : Union[str, Any] = value
UpperCAmelCase_ : Tuple = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ : Union[str, Any] = mixed_qkv[:qkv_dim]
UpperCAmelCase_ : Any = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ : str = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ : Any = query_layer
UpperCAmelCase_ : Union[str, Any] = key_layer
UpperCAmelCase_ : List[Any] = value_layer
else:
UpperCAmelCase_ : List[Any] = value
return model_state_dict
def snake_case ( A__ ,A__ ,A__ ,A__=False ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = init_clap(A__ ,enable_fusion=A__ )
clap_model.eval()
UpperCAmelCase_ : Tuple = clap_model.state_dict()
UpperCAmelCase_ : Optional[int] = rename_state_dict(A__ )
UpperCAmelCase_ : Optional[Any] = ClapConfig()
UpperCAmelCase_ : str = enable_fusion
UpperCAmelCase_ : Any = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ ,strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 95 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> Any:
for attribute in key.split(""".""" ):
__magic_name__: Union[str, Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
__magic_name__: List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
__magic_name__: str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__: Tuple = value
elif weight_type == "weight_g":
__magic_name__: Dict = value
elif weight_type == "weight_v":
__magic_name__: str = value
elif weight_type == "bias":
__magic_name__: Dict = value
elif weight_type == "running_mean":
__magic_name__: List[Any] = value
elif weight_type == "running_var":
__magic_name__: int = value
elif weight_type == "num_batches_tracked":
__magic_name__: Any = value
elif weight_type == "inv_freq":
__magic_name__: List[Any] = value
else:
__magic_name__: List[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Any:
__magic_name__: Optional[Any] = []
__magic_name__: Optional[int] = fairseq_model.state_dict()
__magic_name__: Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__: int = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__magic_name__: Dict = True
else:
for key, mapped_key in MAPPING.items():
__magic_name__: str = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__: List[str] = True
if "*" in mapped_key:
__magic_name__: List[str] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__magic_name__: List[str] = mapped_key.replace("""*""" , __UpperCAmelCase )
if "pos_bias_u" in name:
__magic_name__: Optional[Any] = None
elif "pos_bias_v" in name:
__magic_name__: Tuple = None
elif "weight_g" in name:
__magic_name__: str = """weight_g"""
elif "weight_v" in name:
__magic_name__: Union[str, Any] = """weight_v"""
elif "bias" in name:
__magic_name__: Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__: int = """weight"""
elif "running_mean" in name:
__magic_name__: str = """running_mean"""
elif "inv_freq" in name:
__magic_name__: Union[str, Any] = """inv_freq"""
elif "running_var" in name:
__magic_name__: Union[str, Any] = """running_var"""
elif "num_batches_tracked" in name:
__magic_name__: Tuple = """num_batches_tracked"""
else:
__magic_name__: Dict = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> List[Any]:
__magic_name__: str = full_name.split("""conv_layers.""" )[-1]
__magic_name__: Dict = name.split(""".""" )
__magic_name__: Dict = int(items[0] )
__magic_name__: Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__: Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__: str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__: int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__: Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Dict=True ) -> Optional[Any]:
if config_path is not None:
__magic_name__: int = WavaVecaConformerConfig.from_pretrained(__UpperCAmelCase , hidden_act="""swish""" )
else:
__magic_name__: str = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__magic_name__: str = """rotary"""
if is_finetuned:
if dict_path:
__magic_name__: Any = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__: List[str] = target_dict.pad_index
__magic_name__: Any = target_dict.bos_index
__magic_name__: Optional[int] = target_dict.eos_index
__magic_name__: Tuple = len(target_dict.symbols )
__magic_name__: List[Any] = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__magic_name__: List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__: str = 0
__magic_name__: Optional[Any] = 1
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Tuple = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__magic_name__: int = True if config.feat_extract_norm == """layer""" else False
__magic_name__: int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
__magic_name__: Optional[Any] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__magic_name__: Optional[int] = WavaVecaConformerForCTC(__UpperCAmelCase )
else:
__magic_name__: Dict = WavaVecaConformerForPreTraining(__UpperCAmelCase )
if is_finetuned:
__magic_name__, __magic_name__, __magic_name__: Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__magic_name__: Tuple = argparse.Namespace(task="""audio_pretraining""" )
__magic_name__: Any = fairseq.tasks.setup_task(__UpperCAmelCase )
__magic_name__, __magic_name__, __magic_name__: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
__magic_name__: Tuple = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 96 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = ['pixel_values']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''shortest_edge''': 2_5_6}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> str:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 97 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowercase : Tuple, lowercase : Optional[int], lowercase : Any=None, lowercase : Optional[Any]=None, lowercase : Dict=None, lowercase : List[str]=None, lowercase : Any=None, lowercase : Tuple=None, ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Tuple=0 , lowerCAmelCase__ : List[Any]=0.02 , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = initializer_range
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = 9_9
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_config_and_data()
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ )
_UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase , __magic_name__ ):
"""simple docstring"""
_snake_case : Dict = True
_snake_case : str = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_UpperCamelCase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_UpperCamelCase = ['''Sam''']
_UpperCamelCase = tokenizer(lowerCAmelCase__ , return_tensors='''jax''' )
_UpperCamelCase = model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = '''Sam is a great name. It means "sun" in Gaelic.'''
_UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 98 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def a (lowerCAmelCase__ ):
__a = {}
state_dict.pop("""pixel_mean""" , lowerCAmelCase__ )
state_dict.pop("""pixel_std""" , lowerCAmelCase__ )
__a = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
__a = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__a = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__a = key.replace("""layers.2""" , """proj_out""" )
__a = value
__a = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="ybelkada/segment-anything" ):
__a = hf_hub_download(lowerCAmelCase__ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__a = SamConfig()
elif "sam_vit_l" in model_name:
__a = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__a = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
__a = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__a = SamConfig(
vision_config=lowerCAmelCase__ , )
__a = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
__a = replace_keys(lowerCAmelCase__ )
__a = SamImageProcessor()
__a = SamProcessor(image_processor=lowerCAmelCase__ )
__a = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
__a = hf_model.to("""cuda""" )
__a = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
__a = [[[400, 650]]]
__a = [[1]]
__a = processor(images=np.array(lowerCAmelCase__ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
__a = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
__a = ((75, 275, 1_725, 850),)
__a = processor(images=np.array(lowerCAmelCase__ ) , input_boxes=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
__a = [[[400, 650], [800, 650]]]
__a = [[1, 1]]
__a = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__a = hf_model(**lowerCAmelCase__ )
__a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 99 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
import logging
from transformers import PretrainedConfig
_A : Optional[Any] = logging.getLogger(__name__)
_A : List[Any] = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Tuple = """bertabs"""
def __init__( self , A_=3_05_22 , A_=5_12 , A_=6 , A_=5_12 , A_=8 , A_=5_12 , A_=0.2 , A_=6 , A_=7_68 , A_=8 , A_=20_48 , A_=0.2 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_pos
SCREAMING_SNAKE_CASE__ = enc_layers
SCREAMING_SNAKE_CASE__ = enc_hidden_size
SCREAMING_SNAKE_CASE__ = enc_heads
SCREAMING_SNAKE_CASE__ = enc_ff_size
SCREAMING_SNAKE_CASE__ = enc_dropout
SCREAMING_SNAKE_CASE__ = dec_layers
SCREAMING_SNAKE_CASE__ = dec_hidden_size
SCREAMING_SNAKE_CASE__ = dec_heads
SCREAMING_SNAKE_CASE__ = dec_ff_size
SCREAMING_SNAKE_CASE__ = dec_dropout
| 100 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(example['content'], truncation=A__ )['input_ids']
SCREAMING_SNAKE_CASE_ : Any = len(example['content'] ) / len(output['input_ids'] )
return output
lowerCAmelCase__ : Optional[int] =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__ : List[str] =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ : int =multiprocessing.cpu_count()
lowerCAmelCase__ : str =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__ : Dict =time.time()
lowerCAmelCase__ : Any =load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase__ : Optional[int] =time.time()
lowerCAmelCase__ : int =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase__ : Union[str, Any] =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 101 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ : Union[str, Any] = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 10**12 ) -> int:
_snake_case = 1
_snake_case = 0
_snake_case = 1
_snake_case = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"{solution() = }")
| 103 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
"""simple docstring"""
from functools import lru_cache
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> set:
"""simple docstring"""
A__ = 2
A__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase_ )
if n > 1:
factors.add(UpperCAmelCase_ )
return factors
@lru_cache
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(UpperCAmelCase_ ) )
def _lowerCamelCase ( UpperCAmelCase_ : list ) -> bool:
"""simple docstring"""
return len(set(UpperCAmelCase_ ) ) in (0, 1)
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> list:
"""simple docstring"""
A__ = 2
while True:
# Increment each value of a generated range
A__ = [base + i for i in range(UpperCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A__ = [upf_len(UpperCAmelCase_ ) for x in group]
checker.append(UpperCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def _lowerCamelCase ( UpperCAmelCase_ : int = 4 ) -> int:
"""simple docstring"""
A__ = run(UpperCAmelCase_ )
return results[0] if len(UpperCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 104 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Dict = "bloom"
__a : Any = ["past_key_values"]
__a : Union[str, Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self ,snake_case__=250880 ,snake_case__=64 ,snake_case__=2 ,snake_case__=8 ,snake_case__=1E-5 ,snake_case__=0.02 ,snake_case__=True ,snake_case__=1 ,snake_case__=2 ,snake_case__=False ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=1 ,snake_case__=False ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('n_embed' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE_ : List[Any] = n_layer
SCREAMING_SNAKE_CASE_ : List[Any] = n_head
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = use_cache
SCREAMING_SNAKE_CASE_ : int = pretraining_tp
SCREAMING_SNAKE_CASE_ : int = apply_residual_connection_post_layernorm
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : str = bos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = slow_but_exact
super().__init__(bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = version.parse("1.12" )
def __init__( self ,snake_case__ ,snake_case__ = "default" ,snake_case__ = None ,snake_case__ = False ,):
super().__init__(snake_case__ ,task=snake_case__ ,patching_specs=snake_case__ ,use_past=snake_case__ )
if not getattr(self._config ,'pad_token_id' ,snake_case__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_ : str = 0
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case__ ,direction='inputs' ,inverted_values_shape=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case ( self ):
return self._config.n_layer
@property
def snake_case ( self ):
return self._config.n_head
@property
def snake_case ( self ):
return 1E-3
def snake_case ( self ,snake_case__ ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = False ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : List[Any] = super(snake_case__ ,self ).generate_dummy_inputs(
snake_case__ ,batch_size=snake_case__ ,seq_length=snake_case__ ,is_pair=snake_case__ ,framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : List[Any] = seqlen + 2
SCREAMING_SNAKE_CASE_ : Any = self._config.hidden_size // self.num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
SCREAMING_SNAKE_CASE_ : Dict = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE_ : Any = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ ,snake_case__ ,dtype=snake_case__ )] ,dim=1 )
return ordered_inputs
@property
def snake_case ( self ):
return 13
| 105 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any]=13 , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : Any=32 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : int=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[Any]=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : Dict=None , ) -> str:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 384
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.0_2
A = 3
A = 4
A = 128
A = 2
A = 9
A = 1
A = None
def __UpperCamelCase ( self : str ) -> List[str]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) -> Optional[int]:
A = TFConvBertModel(config=__UpperCamelCase )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = [input_ids, input_mask]
A = model(__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Any ) -> List[str]:
A = TFConvBertForMaskedLM(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) -> List[str]:
A = self.num_labels
A = TFConvBertForSequenceClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) -> Any:
A = self.num_choices
A = TFConvBertForMultipleChoice(config=__UpperCamelCase )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ) -> Union[str, Any]:
A = self.num_labels
A = TFConvBertForTokenClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Tuple ) -> Dict:
A = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Optional[int] = False
A_ : List[str] = False
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = TFConvBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : int ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = True
if hasattr(__UpperCamelCase , 'use_cache' ):
A = True
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
for model_class in self.all_model_classes:
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A = model_class(__UpperCamelCase )
A = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
A = os.path.join(__UpperCamelCase , 'saved_model' , '1' )
A = tf.keras.models.load_model(__UpperCamelCase )
A = model(__UpperCamelCase )
if self.is_encoder_decoder:
A = outputs['encoder_hidden_states']
A = outputs['encoder_attentions']
else:
A = outputs['hidden_states']
A = outputs['attentions']
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase : Optional[int] ):
A = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase : Optional[Any] ):
A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A = True
A = False
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__UpperCamelCase )[0]
A = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) | 106 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_UpperCAmelCase : Any = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_UpperCAmelCase : Dict = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Tuple ) -> str:
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : str ) -> Any:
_A = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict ) -> List[str]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_A = f'facebook/wmt19-{pair}'
_A = self.get_tokenizer(UpperCamelCase__ )
_A = self.get_model(UpperCamelCase__ )
_A = bleu_data[pair]['src']
_A = bleu_data[pair]['tgt']
_A = tokenizer(UpperCamelCase__, return_tensors='pt', truncation=UpperCamelCase__, padding='longest' ).to(UpperCamelCase__ )
_A = model.generate(
input_ids=batch.input_ids, num_beams=8, )
_A = tokenizer.batch_decode(
UpperCamelCase__, skip_special_tokens=UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ )
_A = calculate_bleu(UpperCamelCase__, UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores['bleu'], UpperCamelCase__ )
| 107 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (DPMSolverSinglestepScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 25),)
def lowerCamelCase ( self : Optional[int] , **lowerCamelCase : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCamelCase )
return config
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[str]=0 , **lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**lowerCamelCase )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Optional[int]=0 , **lowerCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Any , lowerCamelCase : Dict=None , **lowerCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**lowerCamelCase )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**lowerCamelCase )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = 50
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_UpperCAmelCase = model(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , algorithm_type="""dpmsolver++""" , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase )
self.check_over_configs(variance_type="""learned_range""" )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(use_karras_sigmas=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**lowerCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa | 108 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import math
import random
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a = 0.02
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCAmelCase ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(__UpperCAmelCase , __UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input("Expected value: "))
a = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 109 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : List[Any] = botoa.client('iam' )
UpperCAmelCase__ : Optional[int] = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_snake_case ,AssumeRolePolicyDocument=json.dumps(_snake_case ,indent=2 ) )
UpperCAmelCase__ : Dict = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_snake_case ,PolicyName=F'''{role_name}_policy_permission''' ,PolicyDocument=json.dumps(_snake_case ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Dict = botoa.client('iam' )
return iam_client.get_role(RoleName=_snake_case )["Role"]["Arn"]
def lowerCamelCase ( ):
UpperCAmelCase__ : Tuple = _ask_options(
'How do you want to authorize?' ,['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] ,_snake_case ,)
UpperCAmelCase__ : str = None
if credentials_configuration == 0:
UpperCAmelCase__ : str = _ask_field('Enter your AWS Profile name: [default] ' ,default='default' )
UpperCAmelCase__ : Any = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
UpperCAmelCase__ : int = _ask_field('AWS Access Key ID: ' )
UpperCAmelCase__ : List[Any] = aws_access_key_id
UpperCAmelCase__ : List[Any] = _ask_field('AWS Secret Access Key: ' )
UpperCAmelCase__ : Optional[Any] = aws_secret_access_key
UpperCAmelCase__ : Tuple = _ask_field('Enter your AWS Region: [us-east-1]' ,default='us-east-1' )
UpperCAmelCase__ : Union[str, Any] = aws_region
UpperCAmelCase__ : Tuple = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' ,['Provide IAM Role name', 'Create new IAM role using credentials'] ,_snake_case ,)
if role_management == 0:
UpperCAmelCase__ : Tuple = _ask_field('Enter your IAM role name: ' )
else:
UpperCAmelCase__ : List[Any] = 'accelerate_sagemaker_execution_role'
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(_snake_case )
UpperCAmelCase__ : str = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
UpperCAmelCase__ : Optional[Any] = None
if is_custom_docker_image:
UpperCAmelCase__ : Dict = _ask_field('Enter your Docker image: ' ,lambda _snake_case : str(_snake_case ).lower() )
UpperCAmelCase__ : int = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
UpperCAmelCase__ : Union[str, Any] = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase__ : Optional[int] = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' ,lambda _snake_case : str(_snake_case ).lower() ,)
UpperCAmelCase__ : List[str] = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
UpperCAmelCase__ : int = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase__ : Optional[Any] = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' ,lambda _snake_case : str(_snake_case ).lower() ,)
UpperCAmelCase__ : List[str] = _ask_options(
'What is the distributed mode?' ,['No distributed training', 'Data parallelism'] ,_convert_sagemaker_distributed_mode ,)
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : List[Any] = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
if use_dynamo:
UpperCAmelCase__ : int = 'dynamo_'
UpperCAmelCase__ : Union[str, Any] = _ask_options(
'Which dynamo backend would you like to use?' ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
UpperCAmelCase__ : Tuple = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
if use_custom_options:
UpperCAmelCase__ : Optional[Any] = _ask_options(
'Which mode do you want to use?' ,_snake_case ,lambda _snake_case : TORCH_DYNAMO_MODES[int(_snake_case )] ,default='default' ,)
UpperCAmelCase__ : Dict = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
UpperCAmelCase__ : Tuple = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=_snake_case ,error_message='Please enter yes or no.' ,)
UpperCAmelCase__ : Optional[int] = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase__ : Dict = _ask_options(
_snake_case ,_snake_case ,lambda _snake_case : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_snake_case )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase__ : List[str] = _ask_field(_snake_case ,lambda _snake_case : str(_snake_case ).lower() ,default='ml.p3.2xlarge' )
UpperCAmelCase__ : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase__ : Tuple = _ask_field(
'How many machines do you want use? [1]: ' ,_snake_case ,default=1 ,)
UpperCAmelCase__ : List[Any] = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' ,['no', 'fp16', 'bf16', 'fp8'] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_snake_case ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=_snake_case ,use_cpu=_snake_case ,dynamo_config=_snake_case ,eca_instance_type=_snake_case ,profile=_snake_case ,region=_snake_case ,iam_role_name=_snake_case ,mixed_precision=_snake_case ,num_machines=_snake_case ,sagemaker_inputs_file=_snake_case ,sagemaker_metrics_file=_snake_case ,)
| 110 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 422 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowercase ( __lowercase ) -> list[float]:
'''simple docstring'''
_A = []
_A = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
_A = -1
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if arr[i] < arr[j]:
_A = arr[j]
break
result.append(_SCREAMING_SNAKE_CASE )
return result
def __lowercase ( __lowercase ) -> list[float]:
'''simple docstring'''
_A = []
for i, outer in enumerate(_SCREAMING_SNAKE_CASE ):
_A = -1
for inner in arr[i + 1 :]:
if outer < inner:
_A = inner
break
result.append(_SCREAMING_SNAKE_CASE )
return result
def __lowercase ( __lowercase ) -> list[float]:
'''simple docstring'''
_A = len(_SCREAMING_SNAKE_CASE )
_A = []
_A = [-1] * arr_size
for index in reversed(range(_SCREAMING_SNAKE_CASE ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_A = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 330 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = "pt"
_lowerCAmelCase = "tf"
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_snake_case )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_snake_case )
model_tf.save_pretrained(_snake_case )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = "mock_framework"
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
_lowerCAmelCase = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
_lowerCAmelCase = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
_lowerCAmelCase = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
_lowerCAmelCase = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_snake_case ):
_lowerCAmelCase = FeaturesManager.determine_framework(_snake_case )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = MagicMock(return_value=_snake_case )
with patch("""transformers.onnx.features.is_tf_available""" , _snake_case ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_snake_case )
with patch("""transformers.onnx.features.is_torch_available""" , _snake_case ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_snake_case )
_lowerCAmelCase = MagicMock(return_value=_snake_case )
with patch("""transformers.onnx.features.is_tf_available""" , _snake_case ), patch(
"""transformers.onnx.features.is_torch_available""" , _snake_case ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_snake_case )
_lowerCAmelCase = MagicMock(return_value=_snake_case )
with patch("""transformers.onnx.features.is_tf_available""" , _snake_case ), patch(
"""transformers.onnx.features.is_torch_available""" , _snake_case ):
with self.assertRaises(_snake_case ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 5 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
__UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __a ( ) -> Optional[int]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
A : List[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A : List[Any] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A : Union[str, Any] = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase_ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="uniform_average" , __UpperCamelCase=True ):
_lowercase = mean_squared_error(
_snake_case , _snake_case , sample_weight=_snake_case , multioutput=_snake_case , squared=_snake_case )
return {"mse": mse} | 287 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = ["speech"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
requires_backends(self , ["""speech"""] )
class _UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = ["speech"]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
requires_backends(self , ["""speech"""] )
| 577 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class __lowercase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = "t5"
UpperCamelCase = ["past_key_values"]
UpperCamelCase = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : List[str] , __lowerCamelCase : Any=3_2_1_2_8 , __lowerCamelCase : Optional[Any]=5_1_2 , __lowerCamelCase : str=6_4 , __lowerCamelCase : Any=2_0_4_8 , __lowerCamelCase : Any=6 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=8 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : List[str]=1_2_8 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=1e-6 , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Union[str, Any]=1 , **__lowerCamelCase : List[Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = d_kv
UpperCAmelCase = d_ff
UpperCAmelCase = num_layers
UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase = num_heads
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = relative_attention_max_distance
UpperCAmelCase = dropout_rate
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_factor
UpperCAmelCase = feed_forward_proj
UpperCAmelCase = use_cache
UpperCAmelCase = self.feed_forward_proj.split("""-""" )
UpperCAmelCase = act_info[-1]
UpperCAmelCase = act_info[0] == "gated"
if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase = "gelu_new"
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , **_snake_case , )
class __lowercase ( __SCREAMING_SNAKE_CASE ):
@property
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase = "past_encoder_sequence + sequence"
UpperCAmelCase = {0: "batch"}
UpperCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction="""inputs""" )
return common_inputs
@property
def _lowercase ( self : str ) -> int:
"""simple docstring"""
return 1_3
| 377 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
__SCREAMING_SNAKE_CASE = {
"do_resize": True,
"size": {"height": 1_8, "width": 1_8},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _snake_case)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_snake_case , _snake_case)
def snake_case_ ( self , **lowerCAmelCase__):
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def snake_case_ ( self , **lowerCAmelCase__):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case)
def snake_case_ ( self):
shutil.rmtree(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1)) for x in image_inputs]
return image_inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case)
processor.save_pretrained(self.tmpdirname)
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0)
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_snake_case , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case)
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_snake_case , return_tensors="""np""")
__SCREAMING_SNAKE_CASE = processor(images=_snake_case , return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case)
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = processor(text=_snake_case)
__SCREAMING_SNAKE_CASE = tokenizer(_snake_case)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case)
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""])
# test if it raises when no input is passed
with self.assertRaises(_snake_case):
processor()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case)
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(_snake_case)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case)
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 155 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 10 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError('Invalid input' )
snake_case: Dict = 10**n
snake_case: Any = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }') | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__a : str = logging.get_logger(__name__)
# General docstring
__a : int = "PoolFormerConfig"
# Base docstring
__a : Union[str, Any] = "sail/poolformer_s12"
__a : Any = [1, 512, 7, 7]
# Image classification docstring
__a : Union[str, Any] = "sail/poolformer_s12"
__a : List[str] = "tabby, tabby cat"
__a : int = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : float = 0.0 , __lowercase : bool = False ) -> Tuple:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
__A = 1 - drop_prob
__A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__A = keep_prob + torch.rand(_SCREAMING_SNAKE_CASE , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__A = input.div(_SCREAMING_SNAKE_CASE ) * random_tensor
return output
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : str = None ):
"""simple docstring"""
super().__init__()
__A = drop_prob
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : str ):
"""simple docstring"""
return drop_path(_snake_case , self.drop_prob , self.training )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
super().__init__()
__A = patch_size if isinstance(_snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
__A = stride if isinstance(_snake_case , collections.abc.Iterable ) else (stride, stride)
__A = padding if isinstance(_snake_case , collections.abc.Iterable ) else (padding, padding)
__A = nn.Convad(_snake_case , _snake_case , kernel_size=_snake_case , stride=_snake_case , padding=_snake_case )
__A = norm_layer(_snake_case ) if norm_layer else nn.Identity()
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = self.projection(_snake_case )
__A = self.norm(_snake_case )
return embeddings
class __lowercase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : int , **UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(1 , _snake_case , **_snake_case )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__()
__A = nn.AvgPoolad(_snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=_snake_case )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.pool(_snake_case ) - hidden_states
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__A = nn.Convad(_snake_case , _snake_case , 1 )
__A = nn.Convad(_snake_case , _snake_case , 1 )
__A = PoolFormerDropPath(_snake_case )
if isinstance(config.hidden_act , _snake_case ):
__A = ACTaFN[config.hidden_act]
else:
__A = config.hidden_act
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.conva(_snake_case )
__A = self.act_fn(_snake_case )
__A = self.drop(_snake_case )
__A = self.conva(_snake_case )
__A = self.drop(_snake_case )
return hidden_states
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__()
__A = PoolFormerPooling(_snake_case )
__A = PoolFormerOutput(_snake_case , _snake_case , _snake_case , _snake_case )
__A = PoolFormerGroupNorm(_snake_case )
__A = PoolFormerGroupNorm(_snake_case )
# Useful for training neural nets
__A = PoolFormerDropPath(_snake_case ) if drop_path > 0.0 else nn.Identity()
__A = config.use_layer_scale
if config.use_layer_scale:
__A = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) , requires_grad=_snake_case )
__A = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) , requires_grad=_snake_case )
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
if self.use_layer_scale:
__A = self.pooling(self.before_norm(_snake_case ) )
__A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__A = hidden_states + self.drop_path(_snake_case )
__A = ()
__A = self.output(self.after_norm(_snake_case ) )
__A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__A = hidden_states + self.drop_path(_snake_case )
__A = (output,) + outputs
return outputs
else:
__A = self.drop_path(self.pooling(self.before_norm(_snake_case ) ) )
# First residual connection
__A = pooling_output + hidden_states
__A = ()
# Second residual connection inside the PoolFormerOutput block
__A = self.drop_path(self.output(self.after_norm(_snake_case ) ) )
__A = hidden_states + layer_output
__A = (output,) + outputs
return outputs
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : int ):
"""simple docstring"""
super().__init__()
__A = config
# stochastic depth decay rule
__A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__A = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__A = nn.ModuleList(_snake_case )
# Transformer blocks
__A = []
__A = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__A = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_snake_case ) )
__A = nn.ModuleList(_snake_case )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Optional[int]=True ):
"""simple docstring"""
__A = () if output_hidden_states else None
__A = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__A = layers
# Get patch embeddings from hidden_states
__A = embedding_layer(_snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(_snake_case ):
__A = blk(_snake_case )
__A = layer_outputs[0]
if output_hidden_states:
__A = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = "poolformer"
SCREAMING_SNAKE_CASE = "pixel_values"
SCREAMING_SNAKE_CASE = True
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
if isinstance(_snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict=False ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
__A = value
__a : List[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__a : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
super().__init__(_snake_case )
__A = config
__A = PoolFormerEncoder(_snake_case )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Dict = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Optional[int] = None , ):
"""simple docstring"""
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__A = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , )
__A = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_snake_case , hidden_states=encoder_outputs.hidden_states , )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__()
__A = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = self.dense(_snake_case )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __SCREAMING_SNAKE_CASE , )
class __lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Any ):
"""simple docstring"""
super().__init__(_snake_case )
__A = config.num_labels
__A = PoolFormerModel(_snake_case )
# Final norm
__A = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__A = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : List[str] = None , ):
"""simple docstring"""
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.poolformer(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , )
__A = outputs[0]
__A = self.classifier(self.norm(_snake_case ).mean([-2, -1] ) )
__A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A = "single_label_classification"
else:
__A = "multi_label_classification"
if self.config.problem_type == "regression":
__A = MSELoss()
if self.num_labels == 1:
__A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__A = loss_fct(_snake_case , _snake_case )
elif self.config.problem_type == "single_label_classification":
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__A = BCEWithLogitsLoss()
__A = loss_fct(_snake_case , _snake_case )
if not return_dict:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 637 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
__lowercase : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowercase : Optional[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase_ : Any = "lm_head"
lowerCamelCase_ : Tuple = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCamelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowerCamelCase_ : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase_ : Any = value
elif weight_type == "weight_g":
lowerCamelCase_ : Optional[int] = value
elif weight_type == "weight_v":
lowerCamelCase_ : Dict = value
elif weight_type == "bias":
lowerCamelCase_ : Dict = value
else:
lowerCamelCase_ : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : List[str] = fairseq_model.state_dict()
lowerCamelCase_ : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ : int = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : List[Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ : Optional[int] = True
if "*" in mapped_key:
lowerCamelCase_ : Tuple = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowerCamelCase_ : Dict = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCamelCase_ : int = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ : List[str] = "weight_v"
elif "bias" in name:
lowerCamelCase_ : List[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : List[Any] = "weight"
else:
lowerCamelCase_ : Dict = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : str = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ : Optional[int] = name.split('''.''' )
lowerCamelCase_ : Tuple = int(items[0] )
lowerCamelCase_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase_ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase_ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase_ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCamelCase_ : Optional[int] = UniSpeechConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase_ : str = Dictionary.load_from_json(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ : Optional[int] = target_dict.pad_index
lowerCamelCase_ : List[str] = target_dict.bos_index
lowerCamelCase_ : Optional[Any] = target_dict.eos_index
lowerCamelCase_ : int = len(target_dict.symbols )
lowerCamelCase_ : int = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ : str = 42
lowerCamelCase_ : str = 43
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ : Tuple = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ : List[str] = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = UniSpeechForCTC(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ : Any = UniSpeechForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
lowerCamelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowerCamelCase_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase_ : str = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 422 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Optional[int]:
'''simple docstring'''
if config_name_or_path is None:
_A = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
_A = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_A = question_encoder_name_or_path
_A = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
_A = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = gen_config
_A = question_encoder_config
_A = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
_A = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
_A = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 330 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
from torch import nn
def A (__lowerCamelCase :Tuple ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 5 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : Dict = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ['PerceiverFeatureExtractor']
A_ : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 0 |
def snake_case_ (__A : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
__UpperCAmelCase = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 651 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : int = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a_ ( __SCREAMING_SNAKE_CASE ):
a : Union[str, Any] = "markuplm"
def __init__( self , __UpperCamelCase=30_522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=256 , __UpperCamelCase=1_024 , __UpperCamelCase=216 , __UpperCamelCase=1_001 , __UpperCamelCase=32 , __UpperCamelCase=50 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ):
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = classifier_dropout
# additional properties
_lowercase = max_depth
_lowercase = max_xpath_tag_unit_embeddings
_lowercase = max_xpath_subs_unit_embeddings
_lowercase = tag_pad_id
_lowercase = subs_pad_id
_lowercase = xpath_unit_hidden_size | 287 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
'''simple docstring'''
def _snake_case ( A_ : list[int] , A_ : list[int] ):
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
a_ : List[str] = equationa
a_ : List[Any] = equationa
# Calculate the determinants of the matrices
a_ : Optional[int] = aa * ba - aa * ba
a_ : Optional[int] = ca * ba - ca * ba
a_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a_ : Optional[int] = determinant_x / determinant
a_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 577 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int = 1.0 , __lowerCamelCase : Any = None , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCAmelCase = initial_learning_rate
UpperCAmelCase = warmup_steps
UpperCAmelCase = power
UpperCAmelCase = decay_schedule_fn
UpperCAmelCase = name
def __call__( self : str , __lowerCamelCase : Any ) -> str:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase = tf.cast(_snake_case , tf.floataa )
UpperCAmelCase = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase = global_step_float / warmup_steps_float
UpperCAmelCase = self.initial_learning_rate * tf.math.pow(_snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_snake_case , )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1e-8 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ) ->Optional[int]:
UpperCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
UpperCAmelCase = WarmUp(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_schedule_fn=_SCREAMING_SNAKE_CASE , warmup_steps=_SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
UpperCAmelCase = AdamWeightDecay(
learning_rate=_SCREAMING_SNAKE_CASE , weight_decay_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_SCREAMING_SNAKE_CASE , )
else:
UpperCAmelCase = tf.keras.optimizers.Adam(
learning_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , __lowerCamelCase : Optional[int] = 0.001 , __lowerCamelCase : Any = 0.9 , __lowerCamelCase : int = 0.999 , __lowerCamelCase : List[str] = 1e-7 , __lowerCamelCase : Union[str, Any] = False , __lowerCamelCase : Union[str, Any] = 0.0 , __lowerCamelCase : str = None , __lowerCamelCase : Any = None , __lowerCamelCase : str = "AdamWeightDecay" , **__lowerCamelCase : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
UpperCAmelCase = weight_decay_rate
UpperCAmelCase = include_in_weight_decay
UpperCAmelCase = exclude_from_weight_decay
@classmethod
def _lowercase ( cls : List[str] , __lowerCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase = {"WarmUp": WarmUp}
return super(_snake_case , cls ).from_config(_snake_case , custom_objects=_snake_case )
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super(_snake_case , self )._prepare_local(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _lowercase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _lowercase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=None , **__lowerCamelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = list(zip(*_snake_case ) )
return super(_snake_case , self ).apply_gradients(zip(_snake_case , _snake_case ) , name=_snake_case , **_snake_case )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase = apply_state or {}
UpperCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase = self._fallback_apply_state(_snake_case , _snake_case )
UpperCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowercase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case )
UpperCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case , self )._resource_apply_dense(_snake_case , _snake_case , **_snake_case )
def _lowercase ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , _snake_case )
UpperCAmelCase = self._decay_weights_op(_snake_case , _snake_case , _snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case , self )._resource_apply_sparse(_snake_case , _snake_case , _snake_case , **_snake_case )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowercase ( self : str , __lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_snake_case , _snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_snake_case , _snake_case ) is not None:
return False
return True
class __lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = None
@property
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
UpperCAmelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
UpperCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_snake_case ) , trainable=_snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_snake_case ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_snake_case )}""" )
for accum_gradient, gradient in zip(self._gradients , _snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_snake_case )
self._accum_steps.assign_add(1 )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_snake_case ) )
| 377 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 0 |
"""simple docstring"""
import json
import sys
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = results[benchmark_name]
__SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
__SCREAMING_SNAKE_CASE = "| metric |"
__SCREAMING_SNAKE_CASE = "|--------|"
__SCREAMING_SNAKE_CASE = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
__SCREAMING_SNAKE_CASE = metric_vals["new"]
__SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = f" {new_val:f}" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__magic_name__ = sys.argv[1]
__magic_name__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 155 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "altclip_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=25_00_02 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_14 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-05 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=7_68 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
snake_case: int = vocab_size
snake_case: List[str] = hidden_size
snake_case: List[str] = num_hidden_layers
snake_case: List[Any] = num_attention_heads
snake_case: int = hidden_act
snake_case: List[Any] = intermediate_size
snake_case: Optional[Any] = hidden_dropout_prob
snake_case: Tuple = attention_probs_dropout_prob
snake_case: Optional[int] = max_position_embeddings
snake_case: List[str] = type_vocab_size
snake_case: str = initializer_range
snake_case: Union[str, Any] = initializer_factor
snake_case: List[Any] = layer_norm_eps
snake_case: Union[str, Any] = position_embedding_type
snake_case: Union[str, Any] = use_cache
snake_case: Any = project_dim
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "altclip_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case: Any = hidden_size
snake_case: Dict = intermediate_size
snake_case: str = projection_dim
snake_case: str = num_hidden_layers
snake_case: Tuple = num_attention_heads
snake_case: int = num_channels
snake_case: Union[str, Any] = patch_size
snake_case: List[Any] = image_size
snake_case: int = initializer_range
snake_case: str = initializer_factor
snake_case: Union[str, Any] = attention_dropout
snake_case: str = layer_norm_eps
snake_case: str = hidden_act
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
cls._set_token_in_kwargs(_snake_case )
snake_case: str = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
snake_case: Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase = "altclip"
__UpperCamelCase = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=2.65_92 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = kwargs.pop('text_config_dict' , _snake_case )
snake_case: str = kwargs.pop('vision_config_dict' , _snake_case )
super().__init__(**_snake_case )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case: int = {}
# This is the complete result when using `text_config_dict`.
snake_case: List[str] = AltCLIPTextConfig(**_snake_case ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case: Union[str, Any] = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case: Tuple = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_snake_case )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case: Dict = {}
# This is the complete result when using `vision_config_dict`.
snake_case: Dict = AltCLIPVisionConfig(**_snake_case ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case: Optional[Any] = {
str(_snake_case ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case: Dict = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case: List[str] = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_snake_case )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case: Optional[int] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
snake_case: Tuple = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
snake_case: Any = AltCLIPTextConfig(**_snake_case )
snake_case: str = AltCLIPVisionConfig(**_snake_case )
snake_case: Tuple = projection_dim
snake_case: Any = logit_scale_init_value
snake_case: List[Any] = 1.0
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = copy.deepcopy(self.__dict__ )
snake_case: Any = self.text_config.to_dict()
snake_case: int = self.vision_config.to_dict()
snake_case: Union[str, Any] = self.__class__.model_type
return output | 329 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
__a : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 637 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 0 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Dict = text, pattern
lowerCamelCase_ : Dict = len(_snake_case ), len(_snake_case )
def UpperCAmelCase__ (self , A ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase__ (self , A ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase__ (self ):
# searches pattern in text and returns index positions
lowerCamelCase_ : str = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase_ : str = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
lowerCamelCase_ : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase_ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowercase : Optional[int] = '''ABAABA'''
__lowercase : List[Any] = '''AB'''
__lowercase : List[Any] = BoyerMooreSearch(text, pattern)
__lowercase : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 422 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : int=3 , __UpperCAmelCase : List[Any]=10 , __UpperCAmelCase : str=[10, 20, 30, 40] , __UpperCAmelCase : Tuple=[1, 1, 2, 1] , __UpperCAmelCase : str=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : List[Any]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(_snake_case )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = FlaxRegNetModel(config=_snake_case )
_A = model(_snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = self.num_labels
_A = FlaxRegNetForImageClassification(config=_snake_case )
_A = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
snake_case = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
snake_case = False
snake_case = False
snake_case = False
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = FlaxRegNetModelTester(self )
_A = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_snake_case )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ):
_A = model_class(_snake_case )
_A = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(_snake_case , _snake_case )
_A = model_class(_snake_case )
@jax.jit
def model_jitted(__UpperCAmelCase : int , **__UpperCAmelCase : Optional[int] ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest("JIT Enabled" ):
_A = model_jitted(**_snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_A = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( ) -> str:
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_snake_case , return_tensors="np" )
_A = model(**_snake_case )
# verify the logits
_A = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case )
_A = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 330 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A (__lowerCamelCase :int ):
_lowerCAmelCase = filter(lambda __lowerCamelCase : p.requires_grad , model.parameters() )
_lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowercase = logging.getLogger(__name__)
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
if metric == "rouge2":
_lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
_lowerCAmelCase = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f'val_{metric}' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def A (__lowerCamelCase :Dict , __lowerCamelCase :Optional[Any] ):
return EarlyStopping(
monitor=f'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class UpperCAmelCase_ ( pl.Callback ):
'''simple docstring'''
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = {F'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase=True ):
"""simple docstring"""
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase = od / "test_results.txt"
_lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_lowerCAmelCase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , """a+""" ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
_lowerCAmelCase = val.item()
_lowerCAmelCase = F'{key}: {val:.6f}\n'
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase = "\n".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_snake_case )
@rank_zero_only
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
try:
_lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase = pl_module.model.num_parameters()
_lowerCAmelCase = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , """test""" )
@rank_zero_only
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 5 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 0 |
from __future__ import annotations
from collections import deque
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> str:
__UpperCAmelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_snake_case )
self.set_fail_transitions()
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> str:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase = 0
for character in keyword:
__UpperCAmelCase = self.find_next_state(_snake_case , _snake_case )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__UpperCAmelCase = len(self.adlist ) - 1
else:
__UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(_snake_case )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_snake_case )
__UpperCAmelCase = 0
while q:
__UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_snake_case )
__UpperCAmelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(_snake_case , self.adlist[child]['''value'''] ) is None
and state != 0
):
__UpperCAmelCase = self.adlist[state]["fail_state"]
__UpperCAmelCase = self.find_next_state(
_snake_case , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__UpperCAmelCase = 0
__UpperCAmelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[Any]:
__UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
__UpperCAmelCase = 0
for i in range(len(_snake_case ) ):
while (
self.find_next_state(_snake_case , string[i] ) is None
and current_state != 0
):
__UpperCAmelCase = self.adlist[current_state]["fail_state"]
__UpperCAmelCase = self.find_next_state(_snake_case , string[i] )
if next_state is None:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCAmelCase = []
result[key].append(i - len(_snake_case ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 0 |
def snake_case_ (__A : str , __A : Optional[int] ) -> List[str]:
__lowerCAmelCase : int = ""
for i in table:
res += inp[i - 1]
return res
def snake_case_ (__A : Dict ) -> Optional[Any]:
return data[1:] + data[0]
def snake_case_ (__A : Any , __A : Any ) -> Union[str, Any]:
__lowerCAmelCase : int = ""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case_ (__A : Optional[int] , __A : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase : Optional[Any] = int("""0b""" + data[0] + data[-1] , 2 )
__lowerCAmelCase : List[str] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def snake_case_ (__A : str , __A : Optional[int] , __A : Any , __A : int , __A : int ) -> List[str]:
__lowerCAmelCase : List[Any] = message[:4]
__lowerCAmelCase : Union[str, Any] = message[4:]
__lowerCAmelCase : str = apply_table(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = xor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = apply_sbox(_SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
__lowerCAmelCase : str = apply_sbox(_SCREAMING_SNAKE_CASE , temp[4:] )
__lowerCAmelCase : int = "0" * (2 - len(_SCREAMING_SNAKE_CASE )) + l # noqa: E741
__lowerCAmelCase : Optional[Any] = "0" * (2 - len(_SCREAMING_SNAKE_CASE )) + r
__lowerCAmelCase : List[str] = apply_table(l + r , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = xor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase = input("""Enter 10 bit key: """)
__UpperCAmelCase = input("""Enter 8 bit message: """)
__UpperCAmelCase = [6, 3, 7, 4, 8, 5, 10, 9]
__UpperCAmelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__UpperCAmelCase = [2, 4, 3, 1]
__UpperCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase = apply_table(key, paa_table)
__UpperCAmelCase = temp[:5]
__UpperCAmelCase = temp[5:]
__UpperCAmelCase = left_shift(left)
__UpperCAmelCase = left_shift(right)
__UpperCAmelCase = apply_table(left + right, pa_table)
__UpperCAmelCase = left_shift(left)
__UpperCAmelCase = left_shift(right)
__UpperCAmelCase = left_shift(left)
__UpperCAmelCase = left_shift(right)
__UpperCAmelCase = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase = apply_table(message, IP)
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = temp[4:] + temp[:4]
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__UpperCAmelCase = apply_table(CT, IP)
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = temp[4:] + temp[:4]
__UpperCAmelCase = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 651 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 287 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
def _snake_case ( A_ : list[int] , A_ : list[int] , A_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( A_ : list[list[int]] , A_ : int , A_ : list[int] , A_ : int ):
"""simple docstring"""
if index == len(_SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(_SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Color current vertex
a_ : Optional[Any] = i
# Validate coloring
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
a_ : Tuple = -1
return False
def _snake_case ( A_ : list[list[int]] , A_ : int ):
"""simple docstring"""
a_ : Tuple = [-1] * len(_SCREAMING_SNAKE_CASE )
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 577 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = (1 - _cos) / 2
UpperCAmelCase = 1 - _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = (1 + _cos) / 2
UpperCAmelCase = -1 - _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = _sin / 2
UpperCAmelCase = 0
UpperCAmelCase = -ba
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 1 - alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) , ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 1_0 ** (gain_db / 4_0)
UpperCAmelCase = 1 + alpha * big_a
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha * big_a
UpperCAmelCase = 1 + alpha / big_a
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha / big_a
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) , ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 1_0 ** (gain_db / 4_0)
UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
UpperCAmelCase = big_a * (pmc + aaa)
UpperCAmelCase = 2 * big_a * mpc
UpperCAmelCase = big_a * (pmc - aaa)
UpperCAmelCase = ppmc + aaa
UpperCAmelCase = -2 * pmpc
UpperCAmelCase = ppmc - aaa
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 / sqrt(2 ) , ) ->IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 1_0 ** (gain_db / 4_0)
UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
UpperCAmelCase = big_a * (ppmc + aaa)
UpperCAmelCase = -2 * big_a * pmpc
UpperCAmelCase = big_a * (ppmc - aaa)
UpperCAmelCase = pmc + aaa
UpperCAmelCase = 2 * mpc
UpperCAmelCase = pmc - aaa
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 377 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.