code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: List[str] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCAmelCase_ )
def UpperCamelCase ( self: int , **UpperCAmelCase_: Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , **UpperCAmelCase_: int ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = image_processor(UpperCAmelCase_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
_SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
_SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [["""cat""", """nasa badge"""], ["""person"""]]
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = max([len(UpperCAmelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
_SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
_SCREAMING_SNAKE_CASE = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase_ , query_images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from scipy.stats import spearmanr
import datasets
UpperCamelCase = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
UpperCamelCase = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
UpperCamelCase = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __UpperCAmelCase (datasets.Metric ):
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any]=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = spearmanr(UpperCAmelCase_ , UpperCAmelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(snake_case__ ,snake_case__ ) ) )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
_SCREAMING_SNAKE_CASE = (
"""Wrong input data's dimensions... """
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_SCREAMING_SNAKE_CASE = (
"""Wrong input data's shape... """
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
_SCREAMING_SNAKE_CASE = (
"""Input data have different datatype... """
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(snake_case__ )
_SCREAMING_SNAKE_CASE = []
for value in value_array:
_SCREAMING_SNAKE_CASE = euclidean(snake_case__ ,dataset[0] )
_SCREAMING_SNAKE_CASE = dataset[0].tolist()
for dataset_value in dataset[1:]:
_SCREAMING_SNAKE_CASE = euclidean(snake_case__ ,snake_case__ )
if dist > temp_dist:
_SCREAMING_SNAKE_CASE = temp_dist
_SCREAMING_SNAKE_CASE = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
return np.dot(snake_case__ ,snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if isinstance(snake_case__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase :
def UpperCamelCase ( self: int , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: str , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: float ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.abs((a - b) ).max()
self.assertLessEqual(UpperCAmelCase_ , UpperCAmelCase_ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any]=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model}
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: str=None , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model}
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_output[0]
_SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-3 )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any]=None , **UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model}
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(
input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.vision_model_output.attentions
self.assertEqual(len(UpperCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size )
_SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size )
_SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_SCREAMING_SNAKE_CASE = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_SCREAMING_SNAKE_CASE = output.text_model_output.attentions
self.assertEqual(len(UpperCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Any , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
pt_model.to(UpperCAmelCase_ )
pt_model.eval()
# prepare inputs
_SCREAMING_SNAKE_CASE = inputs_dict
_SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_SCREAMING_SNAKE_CASE = pt_model(**UpperCAmelCase_ ).to_tuple()
_SCREAMING_SNAKE_CASE = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
pt_model_loaded.to(UpperCAmelCase_ )
pt_model_loaded.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output_loaded.numpy() , 4E-2 )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: str , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = fx_state
self.check_pt_flax_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
self.check_pt_flax_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_save_load(**UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCAmelCase_ )
@is_pt_flax_cross_test
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = config_inputs_dict.pop("""vision_config""" )
_SCREAMING_SNAKE_CASE = config_inputs_dict.pop("""text_config""" )
_SCREAMING_SNAKE_CASE = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.check_equivalence_flax_to_pt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_pretrained_model_and_inputs()
_SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_outputs[0]
_SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-5 )
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCAmelCase_ , text_from_pt=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxViTModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
return vision_model, text_model
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
_SCREAMING_SNAKE_CASE = FlaxBertModelTester(self )
_SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = vision_config_and_inputs
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCAmelCase_ , text_from_pt=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxCLIPVisionModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
return vision_model, text_model
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxCLIPVisionModelTester(self )
_SCREAMING_SNAKE_CASE = FlaxBertModelTester(self )
_SCREAMING_SNAKE_CASE = clip_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = vision_config_and_inputs
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_SCREAMING_SNAKE_CASE = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_SCREAMING_SNAKE_CASE = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , UpperCAmelCase_ , atol=1E-3 ) )
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( snake_case__ ) -> list[tuple[int, int]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) # No of vertices in graph
_SCREAMING_SNAKE_CASE = [0] * n
_SCREAMING_SNAKE_CASE = [False] * n
def dfs(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case__ ,snake_case__ ,snake_case__ ,id_ )
_SCREAMING_SNAKE_CASE = min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_SCREAMING_SNAKE_CASE = min(low[at] ,low[to] )
_SCREAMING_SNAKE_CASE = []
for i in range(snake_case__ ):
if not visited[i]:
dfs(snake_case__ ,-1 ,snake_case__ ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCamelCase ( *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: Any ):
'''simple docstring'''
pass
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE = npimg.shape
return {"hash": hashimage(snake_case__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : Optional[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__snake_case : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase ( self: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_SCREAMING_SNAKE_CASE = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
_SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_21},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.00_53},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.99_67},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_93},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.99_09},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.98_79},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.98_34},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.97_16},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.96_12},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.95_99},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.95_52},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.95_32},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.95_16},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.94_99},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.94_83},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.94_64},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.94_08},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.93_35},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.93_26},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.92_62},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.89_99},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.89_86},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.89_84},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.88_73},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/sam-vit-huge"""
_SCREAMING_SNAKE_CASE = pipeline("""mask-generation""" , model=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.02_10},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.00_53},
] , )
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: int=3 , UpperCAmelCase_: List[str]=30 , UpperCAmelCase_: Union[str, Any]=400 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase_: Any=[0.5, 0.5, 0.5] , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]=1 / 255 , UpperCAmelCase_: int=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE = self.image_processing_class(do_resize=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_SCREAMING_SNAKE_CASE = image_processing_a.pad(UpperCAmelCase_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = image_processing_a(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""image_id""": 39_769, """annotations""": target}
# encode them
_SCREAMING_SNAKE_CASE = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
_SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_SCREAMING_SNAKE_CASE = YolosImageProcessor(format="""coco_panoptic""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify masks
_SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MobileNetV2FeatureExtractor''']
UpperCamelCase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class __UpperCAmelCase (Generic[T] ):
def __init__( self: Optional[int] , UpperCAmelCase_: T ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
def __str__( self: Optional[int] ):
'''simple docstring'''
return F'{self.data}'
class __UpperCAmelCase (Generic[T] ):
def __init__( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
def __iter__( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
_SCREAMING_SNAKE_CASE = node.next
def __str__( self: Union[str, Any] ):
'''simple docstring'''
return "->".join([str(UpperCAmelCase_ ) for item in self] )
def __len__( self: Any ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.top is None
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: T ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Node(UpperCAmelCase_ )
if not self.is_empty():
_SCREAMING_SNAKE_CASE = self.top
_SCREAMING_SNAKE_CASE = node
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.top
_SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def UpperCamelCase ( self: str ):
'''simple docstring'''
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_SCREAMING_SNAKE_CASE = to_pil_image(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pil_image.size
_SCREAMING_SNAKE_CASE = pytesseract.image_to_data(snake_case__ ,lang=snake_case__ ,output_type="""dict""" ,config=snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(snake_case__ ) if not word.strip()]
_SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(snake_case__ ) if idx not in irrelevant_indices]
_SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
_SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
_SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
_SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(snake_case__ )
# finally, normalize the bounding boxes
_SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case__ ,snake_case__ ,snake_case__ ) )
assert len(snake_case__ ) == len(snake_case__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = ["pixel_values"]
def __init__( self: List[str] , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[str] = "" , **UpperCAmelCase_: str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = apply_ocr
_SCREAMING_SNAKE_CASE = ocr_lang
_SCREAMING_SNAKE_CASE = tesseract_config
def UpperCamelCase ( self: Dict , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = (size["""height"""], size["""width"""])
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: ImageInput , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
_SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
_SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
_SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for image in images:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = apply_tesseract(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
words_batch.append(UpperCAmelCase_ )
boxes_batch.append(UpperCAmelCase_ )
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_SCREAMING_SNAKE_CASE = [flip_channel_order(UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = BatchFeature(data={"""pixel_values""": images} , tensor_type=UpperCAmelCase_ )
if apply_ocr:
_SCREAMING_SNAKE_CASE = words_batch
_SCREAMING_SNAKE_CASE = boxes_batch
return data
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
from __future__ import annotations
import math
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(snake_case__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_SCREAMING_SNAKE_CASE = []
for num in range(len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
_SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"{solution() = }")
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase (unittest.TestCase ):
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
_SCREAMING_SNAKE_CASE = PNDMScheduler()
_SCREAMING_SNAKE_CASE = PNDMPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
pndm.to(UpperCAmelCase_ )
pndm.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pndm(generator=UpperCAmelCase_ , num_inference_steps=20 , output_type="""numpy""" ).images
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pndm(generator=UpperCAmelCase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """google/ddpm-cifar10-32"""
_SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = PNDMScheduler()
_SCREAMING_SNAKE_CASE = PNDMPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
pndm.to(UpperCAmelCase_ )
pndm.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pndm(generator=UpperCAmelCase_ , output_type="""numpy""" ).images
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase = 16
UpperCamelCase = 32
def __lowerCamelCase ( snake_case__ ,snake_case__ = 16 ,snake_case__ = "bert-base-cased" ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" )
return tokenizer.pad(snake_case__ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = args.model_name_or_path
set_seed(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataloaders(snake_case__ ,snake_case__ ,snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(snake_case__ ,return_dict=snake_case__ )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() ,lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=snake_case__ ,num_warmup_steps=0 ,num_training_steps=snake_case__ ,)
else:
_SCREAMING_SNAKE_CASE = DummyScheduler(snake_case__ ,total_num_steps=snake_case__ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE = 0
# Now we train the model
_SCREAMING_SNAKE_CASE = evaluate.load("""glue""" ,"""mrpc""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = {}
for epoch in range(snake_case__ ,snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.loss
_SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
_SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ ,references=snake_case__ ,)
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,snake_case__ )
_SCREAMING_SNAKE_CASE = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
_SCREAMING_SNAKE_CASE = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=snake_case__ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=snake_case__ ,)
parser.add_argument(
"""--output_dir""" ,type=snake_case__ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=snake_case__ ,default=snake_case__ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=snake_case__ ,default=3 ,help="""Number of train epochs.""" ,)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ ,snake_case__ )
if __name__ == "__main__":
main()
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowerCamelCase ( snake_case__ ,snake_case__=False ) -> List[str]:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
_SCREAMING_SNAKE_CASE = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
return unittest.skip("""Test was skipped""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests ,"""test is slow""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() ,"""test requires only a CPU""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() ,"""test requires a GPU""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() ,"""test requires a XPU""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() ,"""test requires a `mps` backend support in `torch`""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"""test requires the Hugging Face suite""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() ,"""test requires the bitsandbytes library""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() ,"""test requires TPU""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"""test requires a GPU""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"""test requires a XPU""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"""test requires multiple GPUs""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"""test requires multiple XPUs""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() ,"""test requires safetensors""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() ,"""test requires DeepSpeed""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" ,"""1.12.0""" ) ,"""test requires torch version >= 1.12.0""" )(snake_case__ )
def __lowerCamelCase ( snake_case__=None ,snake_case__=None ) -> List[Any]:
"""simple docstring"""
if test_case is None:
return partial(snake_case__ ,version=snake_case__ )
return unittest.skipUnless(is_torch_version(""">=""" ,snake_case__ ) ,F'test requires torch version >= {version}' )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() ,"""test requires Tensorboard""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() ,"""test requires wandb""" )(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() ,"""test requires comet_ml""" )(snake_case__ )
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available ,"""test requires at least one tracker to be available and for `comet_ml` to not be installed""" ,)(snake_case__ )
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : List[Any] = True
@classmethod
def UpperCamelCase ( cls: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
@classmethod
def UpperCamelCase ( cls: Tuple ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_ )
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = mocks if isinstance(UpperCAmelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AcceleratorState()
_SCREAMING_SNAKE_CASE = tensor[None].clone().to(state.device )
_SCREAMING_SNAKE_CASE = gather(snake_case__ ).cpu()
_SCREAMING_SNAKE_CASE = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,snake_case__ ):
return False
return True
class __UpperCAmelCase :
def __init__( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: int , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = returncode
_SCREAMING_SNAKE_CASE = stdout
_SCREAMING_SNAKE_CASE = stderr
async def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def __lowerCamelCase ( snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=False ,snake_case__=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ ,""" """.join(snake_case__ ) )
_SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=snake_case__ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=snake_case__ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def tee(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__="" ):
_SCREAMING_SNAKE_CASE = line.decode("""utf-8""" ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ ,snake_case__ ,file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda snake_case__ : tee(snake_case__ ,snake_case__ ,sys.stdout ,label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda snake_case__ : tee(snake_case__ ,snake_case__ ,sys.stderr ,label="""stderr:""" ) ) ),
] ,timeout=snake_case__ ,)
return _RunOutput(await p.wait() ,snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=1_80 ,snake_case__=False ,snake_case__=True ) -> _RunOutput:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
_SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(snake_case__ ,env=snake_case__ ,stdin=snake_case__ ,timeout=snake_case__ ,quiet=snake_case__ ,echo=snake_case__ ) )
_SCREAMING_SNAKE_CASE = """ """.join(snake_case__ )
if result.returncode > 0:
_SCREAMING_SNAKE_CASE = """\n""".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class __UpperCAmelCase (_UpperCAmelCase ):
pass
def __lowerCamelCase ( snake_case__ ,snake_case__=False ) -> Any:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE = subprocess.check_output(snake_case__ ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case__ ,"""decode""" ):
_SCREAMING_SNAKE_CASE = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCAmelCase_ ) , 0 )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
# Check that tokenizer_type ≠ model_type
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCAmelCase_ , """vocab.txt""" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="""bert""" , use_fast=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCAmelCase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCAmelCase_ , """merges.txt""" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="""gpt2""" , use_fast=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
@require_tokenizers
def UpperCamelCase ( self: Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCAmelCase_ , """vocab.txt""" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="""bert""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCAmelCase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCAmelCase_ , """merges.txt""" ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , tokenizer_type="""gpt2""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
with pytest.raises(UpperCAmelCase_ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def UpperCamelCase ( self: int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCAmelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCAmelCase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
_SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TOKENIZER_MAPPING.values()
_SCREAMING_SNAKE_CASE = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCAmelCase_ )
@require_tokenizers
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCAmelCase_ )
@require_tokenizers
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """Hello, world. How are you?"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_tokenizer_config("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = config.pop("""_commit_hash""" , UpperCAmelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCAmelCase_ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_SCREAMING_SNAKE_CASE = get_tokenizer_config(UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = get_tokenizer_config(UpperCAmelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CustomTokenizer.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase ( self: Any ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCAmelCase_ )
# Can register in two steps
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained(UpperCAmelCase_ )
bert_tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CustomTokenizerFast.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = False
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[int] = NewTokenizer
__snake_case : Optional[Any] = False
try:
AutoConfig.register("""custom""" , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , fast_tokenizer_class=UpperCAmelCase_ )
# If remote code is not set, the default is to use local
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCAmelCase_ , revision="""aaaaaa""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_SCREAMING_SNAKE_CASE = img
_SCREAMING_SNAKE_CASE = img.shape[1]
_SCREAMING_SNAKE_CASE = img.shape[0]
_SCREAMING_SNAKE_CASE = dst_width
_SCREAMING_SNAKE_CASE = dst_height
_SCREAMING_SNAKE_CASE = self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE = self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_SCREAMING_SNAKE_CASE = self.img[self.get_y(UpperCAmelCase_ )][self.get_x(UpperCAmelCase_ )]
def UpperCamelCase ( self: Any , UpperCAmelCase_: int ):
'''simple docstring'''
return int(self.ratio_x * x )
def UpperCamelCase ( self: Any , UpperCAmelCase_: int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase = 800, 600
UpperCamelCase = imread('''image_data/lena.jpg''', 1)
UpperCamelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = ["pixel_values"]
def __init__( self: List[str] , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: Union[int, float] = 1 / 255 , UpperCAmelCase_: bool = True , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 256, """width""": 256}
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self: Any , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Union[int, float] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: int , ):
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Any , ):
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: ImageInput , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: str=None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: float = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
from math import factorial
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(snake_case__ ) // (factorial(snake_case__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __UpperCAmelCase (_UpperCAmelCase ):
def __lt__( self: Any , UpperCAmelCase_: Any ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self: List[str] , UpperCAmelCase_: str ):
'''simple docstring'''
return self[-1] == other[-1]
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
# sort into stacks
for element in collection:
_SCREAMING_SNAKE_CASE = Stack([element] )
_SCREAMING_SNAKE_CASE = bisect_left(snake_case__ ,snake_case__ )
if i != len(snake_case__ ):
stacks[i].append(snake_case__ )
else:
stacks.append(snake_case__ )
# use a heap-based merge to merge stack efficiently
_SCREAMING_SNAKE_CASE = merge(*(reversed(snake_case__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
UpperCamelCase = {
'''openbmb/cpm-ant-10b''': 1_024,
}
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as reader:
_SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = token.rstrip("""\n""" )
_SCREAMING_SNAKE_CASE = index
return vocab
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: int=200 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab
_SCREAMING_SNAKE_CASE = unk_token
_SCREAMING_SNAKE_CASE = max_input_chars_per_word
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = []
while start < len(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = None
while start < end:
_SCREAMING_SNAKE_CASE = """""".join(chars[start:end] )
if substr in self.vocab:
_SCREAMING_SNAKE_CASE = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = end
return sub_tokens
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Any = ["input_ids", "attention_mask"]
__snake_case : str = False
def __init__( self: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any]="<d>" , UpperCAmelCase_: Optional[int]="</d>" , UpperCAmelCase_: List[Any]="<s>" , UpperCAmelCase_: Dict="</s>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: List[Any]="<unk>" , UpperCAmelCase_: int="</n>" , UpperCAmelCase_: str="</_>" , UpperCAmelCase_: int="left" , **UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=UpperCAmelCase_ , eod_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , line_token=UpperCAmelCase_ , space_token=UpperCAmelCase_ , padding_side=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = bod_token
_SCREAMING_SNAKE_CASE = eod_token
_SCREAMING_SNAKE_CASE = load_vocab(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.encoder[space_token]
_SCREAMING_SNAKE_CASE = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_SCREAMING_SNAKE_CASE = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase_ : x[1] ) )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return self.encoder["\n"]
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for x in jieba.cut(UpperCAmelCase_ , cut_all=UpperCAmelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCAmelCase_ ) )
return output_tokens
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: int , **UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [i for i in token_ids if i >= 0]
_SCREAMING_SNAKE_CASE = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Any ):
'''simple docstring'''
return token in self.encoder
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return "".join(UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self: int , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_SCREAMING_SNAKE_CASE = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_SCREAMING_SNAKE_CASE = 0
if " " in self.encoder:
_SCREAMING_SNAKE_CASE = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_SCREAMING_SNAKE_CASE = self.encoder["""\n"""]
del self.encoder["\n"]
_SCREAMING_SNAKE_CASE = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCAmelCase_ : x[1] ) )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
_SCREAMING_SNAKE_CASE = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ ))
return [1] + ([0] * len(UpperCAmelCase_ ))
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''spm_char.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
UpperCamelCase = {
'''microsoft/speecht5_asr''': 1_024,
'''microsoft/speecht5_tts''': 1_024,
'''microsoft/speecht5_vc''': 1_024,
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Union[str, Any] = VOCAB_FILES_NAMES
__snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Tuple = ["input_ids", "attention_mask"]
def __init__( self: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Dict="</s>" , UpperCAmelCase_: str="<unk>" , UpperCAmelCase_: Optional[Any]="<pad>" , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self: Union[str, Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(UpperCAmelCase_ )
return token
def UpperCamelCase ( self: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[str]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self: int , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [1]
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + suffix_ones
return ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __UpperCAmelCase (unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: int ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ , repo_id="""test-config""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase_ , repo_id="""valid_org/test-config-org""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: str ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
_SCREAMING_SNAKE_CASE = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_SCREAMING_SNAKE_CASE = c.n_embd + 1 # int
_SCREAMING_SNAKE_CASE = c.resid_pdrop + 1.0 # float
_SCREAMING_SNAKE_CASE = not c.scale_attn_weights # bool
_SCREAMING_SNAKE_CASE = c.summary_type + """foo""" # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(UpperCAmelCase_ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(UpperCAmelCase_ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(UpperCAmelCase_ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(UpperCAmelCase_ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = PretrainedConfig()
_SCREAMING_SNAKE_CASE = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase_ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
_SCREAMING_SNAKE_CASE = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase_ , UpperCAmelCase_ )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F' {", ".join(UpperCAmelCase_ )}.' )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = mock.Mock()
_SCREAMING_SNAKE_CASE = 500
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = HTTPError
_SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=UpperCAmelCase_ ) as mock_head:
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase_ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_SCREAMING_SNAKE_CASE = ["""config.42.0.0.json"""]
_SCREAMING_SNAKE_CASE = 768
configuration.save_pretrained(UpperCAmelCase_ )
shutil.move(os.path.join(UpperCAmelCase_ , """config.4.0.0.json""" ) , os.path.join(UpperCAmelCase_ , """config.42.0.0.json""" ) )
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
_SCREAMING_SNAKE_CASE = """v4.0.0"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_SCREAMING_SNAKE_CASE = """v3.0.0"""
_SCREAMING_SNAKE_CASE = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: str , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Dict=True , UpperCAmelCase_: int=False , UpperCAmelCase_: List[Any]="<s>" , UpperCAmelCase_: int="</s>" , UpperCAmelCase_: str="<unk>" , UpperCAmelCase_: List[Any]="<sep>" , UpperCAmelCase_: Optional[Any]="<pad>" , UpperCAmelCase_: Optional[Any]="<cls>" , UpperCAmelCase_: Any="<mask>" , UpperCAmelCase_: List[Any]=["<eop>", "<eod>"] , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_SCREAMING_SNAKE_CASE = jieba
_SCREAMING_SNAKE_CASE = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self: List[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
if self.remove_space:
_SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE = inputs
_SCREAMING_SNAKE_CASE = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.preprocess_text(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """""".join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , """ """ ).strip()
return out_string
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def UpperCamelCase ( self: int , *UpperCAmelCase_: str , **UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super()._decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''PoolFormerFeatureExtractor''']
UpperCamelCase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,snake_case__ ):
_SCREAMING_SNAKE_CASE = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
from manim import *
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
_SCREAMING_SNAKE_CASE = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
_SCREAMING_SNAKE_CASE = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""CPU""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(1 )]
_SCREAMING_SNAKE_CASE = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""GPU""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.align_to(UpperCAmelCase_ , UpperCAmelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""Model""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) , Create(UpperCAmelCase_ , run_time=1 ) , )
_SCREAMING_SNAKE_CASE = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=2.5 ) , Write(UpperCAmelCase_ ) , Write(UpperCAmelCase_ ) )
self.add(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase_ )
cpu_target.generate_target()
_SCREAMING_SNAKE_CASE = 0.46 / 4
_SCREAMING_SNAKE_CASE = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase_ , buff=0.0 )
cpu_targs.append(UpperCAmelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase_ ) )
second_animations.append(MoveToTarget(UpperCAmelCase_ , run_time=1.5 ) )
self.play(*UpperCAmelCase_ )
self.play(*UpperCAmelCase_ )
self.wait()
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Union[str, Any] = ["vqvae"]
def __init__( self: Optional[int] , UpperCAmelCase_: AutoencoderKL , UpperCAmelCase_: UNetaDConditionModel , UpperCAmelCase_: Mel , UpperCAmelCase_: Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , UpperCAmelCase_ ) else 1_000
@torch.no_grad()
def __call__( self: str , UpperCAmelCase_: int = 1 , UpperCAmelCase_: str = None , UpperCAmelCase_: np.ndarray = None , UpperCAmelCase_: int = 0 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: int = None , UpperCAmelCase_: torch.Generator = None , UpperCAmelCase_: float = 0 , UpperCAmelCase_: float = 0 , UpperCAmelCase_: torch.Generator = None , UpperCAmelCase_: float = 0 , UpperCAmelCase_: torch.Tensor = None , UpperCAmelCase_: torch.Tensor = None , UpperCAmelCase_: Union[str, Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
_SCREAMING_SNAKE_CASE = noise
_SCREAMING_SNAKE_CASE = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mel.audio_slice_to_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE = (input_image / 255) * 2 - 1
_SCREAMING_SNAKE_CASE = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0 ) ).latent_dist.sample(
generator=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["""sample"""]
else:
_SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase_ , UpperCAmelCase_ )["""sample"""]
if isinstance(self.scheduler , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )["""prev_sample"""]
else:
_SCREAMING_SNAKE_CASE = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE = self.vqvae.decode(UpperCAmelCase_ )["""sample"""]
_SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE = (images * 255).round().astype("""uint8""" )
_SCREAMING_SNAKE_CASE = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
_SCREAMING_SNAKE_CASE = [self.mel.image_to_audio(UpperCAmelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCAmelCase_ ) )
@torch.no_grad()
def UpperCamelCase ( self: str , UpperCAmelCase_: List[Image.Image] , UpperCAmelCase_: int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , UpperCAmelCase_ )
self.scheduler.set_timesteps(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE = (sample / 255) * 2 - 1
_SCREAMING_SNAKE_CASE = torch.Tensor(UpperCAmelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase_ , UpperCAmelCase_ )["""sample"""]
_SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: torch.Tensor , UpperCAmelCase_: torch.Tensor , UpperCAmelCase_: float ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = acos(torch.dot(torch.flatten(UpperCAmelCase_ ) , torch.flatten(UpperCAmelCase_ ) ) / torch.norm(UpperCAmelCase_ ) / torch.norm(UpperCAmelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase_ ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase_ )
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = extended_euclid(snake_case__ ,a % b )
_SCREAMING_SNAKE_CASE = a // b
return (y, x - k * y)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = extended_euclid(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = na * na
_SCREAMING_SNAKE_CASE = ra * x * na + ra * y * na
return (n % m + m) % m
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = extended_euclid(snake_case__ ,snake_case__ )
if b < 0:
_SCREAMING_SNAKE_CASE = (b % n + n) % n
return b
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = invert_modulo(snake_case__ ,snake_case__ ), invert_modulo(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = na * na
_SCREAMING_SNAKE_CASE = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : int = "nllb-moe"
__snake_case : Any = ["past_key_values"]
__snake_case : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self: Dict , UpperCAmelCase_: List[Any]=128_112 , UpperCAmelCase_: Any=1_024 , UpperCAmelCase_: Dict=12 , UpperCAmelCase_: int=4_096 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: str=12 , UpperCAmelCase_: Any=4_096 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: Dict=0.05 , UpperCAmelCase_: Union[str, Any]=0.05 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: str=True , UpperCAmelCase_: str="relu" , UpperCAmelCase_: Optional[Any]=1_024 , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: int=0.0 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: Any=2 , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Any=False , UpperCAmelCase_: Optional[Any]="float32" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: str=128 , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int=4 , UpperCAmelCase_: str=4 , UpperCAmelCase_: List[Any]=0.0_01 , UpperCAmelCase_: int=0.0_01 , UpperCAmelCase_: Tuple="all" , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Tuple=1.0 , UpperCAmelCase_: List[Any]=0.2 , UpperCAmelCase_: str=1 , UpperCAmelCase_: List[Any]=0 , UpperCAmelCase_: List[str]=2 , UpperCAmelCase_: Optional[int]=False , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = encoder_ffn_dim
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = encoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = init_std
_SCREAMING_SNAKE_CASE = encoder_layerdrop
_SCREAMING_SNAKE_CASE = decoder_layerdrop
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE = router_z_loss_coef
_SCREAMING_SNAKE_CASE = router_aux_loss_coef
_SCREAMING_SNAKE_CASE = decoder_sparse_step
_SCREAMING_SNAKE_CASE = encoder_sparse_step
_SCREAMING_SNAKE_CASE = num_experts
_SCREAMING_SNAKE_CASE = expert_capacity
_SCREAMING_SNAKE_CASE = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
_SCREAMING_SNAKE_CASE = router_dtype
_SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
_SCREAMING_SNAKE_CASE = batch_prioritized_routing
_SCREAMING_SNAKE_CASE = second_expert_policy
_SCREAMING_SNAKE_CASE = normalize_router_prob_before_dropping
_SCREAMING_SNAKE_CASE = moe_eval_capacity_token_fraction
_SCREAMING_SNAKE_CASE = moe_token_dropout
_SCREAMING_SNAKE_CASE = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_SCREAMING_SNAKE_CASE = str(snake_case__ )
_SCREAMING_SNAKE_CASE = """""".join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCamelCase ( snake_case__ = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
def __lowerCamelCase ( snake_case__ = 10**9 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b.T
_SCREAMING_SNAKE_CASE = np.sum(np.square(snake_case__ ) ,axis=1 )
_SCREAMING_SNAKE_CASE = np.sum(np.square(snake_case__ ) ,axis=0 )
_SCREAMING_SNAKE_CASE = np.matmul(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = x.reshape(-1 ,3 )
_SCREAMING_SNAKE_CASE = squared_euclidean_distance(snake_case__ ,snake_case__ )
return np.argmin(snake_case__ ,axis=1 )
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = ["pixel_values"]
def __init__( self: Tuple , UpperCAmelCase_: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_: bool = True , UpperCAmelCase_: bool = True , **UpperCAmelCase_: str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 256, """width""": 256}
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase_ ) if clusters is not None else None
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = do_color_quantize
def UpperCamelCase ( self: int , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = rescale(image=UpperCAmelCase_ , scale=1 / 1_27.5 , data_format=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image - 1
return image
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: ImageInput , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
_SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase_ ) for image in images]
if do_color_quantize:
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_SCREAMING_SNAKE_CASE = np.array(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = color_quantize(UpperCAmelCase_ , UpperCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_SCREAMING_SNAKE_CASE = images.shape[0]
_SCREAMING_SNAKE_CASE = images.reshape(UpperCAmelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_SCREAMING_SNAKE_CASE = list(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = {"""input_ids""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
_SCREAMING_SNAKE_CASE = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_SCREAMING_SNAKE_CASE = left
_SCREAMING_SNAKE_CASE = point
elif point > right:
_SCREAMING_SNAKE_CASE = right
_SCREAMING_SNAKE_CASE = point
else:
if item < current_item:
_SCREAMING_SNAKE_CASE = point - 1
else:
_SCREAMING_SNAKE_CASE = point + 1
return None
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
elif point > right:
return interpolation_search_by_recursion(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
snake_case__ ,snake_case__ ,snake_case__ ,point - 1 )
else:
return interpolation_search_by_recursion(
snake_case__ ,snake_case__ ,point + 1 ,snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if collection != sorted(snake_case__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
UpperCamelCase = 0
if debug == 1:
UpperCamelCase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
UpperCamelCase = 67
UpperCamelCase = interpolation_search(collection, target)
if result is not None:
print(f"{target} found at positions: {result}")
else:
print('''Not found''')
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ), F'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
_SCREAMING_SNAKE_CASE = F'The input value of [n={number}] has to be > 0'
raise ValueError(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = sylvester(number - 1 )
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
while b > 0:
if b & 1:
_SCREAMING_SNAKE_CASE = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
UpperCamelCase = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = start
# add current to visited
visited.append(snake_case__ )
_SCREAMING_SNAKE_CASE = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_SCREAMING_SNAKE_CASE = topological_sort(snake_case__ ,snake_case__ ,snake_case__ )
# if all neighbors visited add current to sort
sort.append(snake_case__ )
# if all vertices haven't been visited select a new one to visit
if len(snake_case__ ) != len(snake_case__ ):
for vertice in vertices:
if vertice not in visited:
_SCREAMING_SNAKE_CASE = topological_sort(snake_case__ ,snake_case__ ,snake_case__ )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase = topological_sort('''a''', [], [])
print(sort)
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[Any] = "maskformer"
__snake_case : Union[str, Any] = {"hidden_size": "mask_feature_size"}
__snake_case : Any = ["resnet", "swin"]
__snake_case : List[Any] = ["detr"]
def __init__( self: Optional[int] , UpperCAmelCase_: int = 256 , UpperCAmelCase_: int = 256 , UpperCAmelCase_: float = 0.1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[Dict] = None , UpperCAmelCase_: Optional[Dict] = None , UpperCAmelCase_: float = 0.02 , UpperCAmelCase_: float = 1.0 , UpperCAmelCase_: float = 1.0 , UpperCAmelCase_: float = 1.0 , UpperCAmelCase_: float = 20.0 , UpperCAmelCase_: Optional[bool] = None , **UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_SCREAMING_SNAKE_CASE = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE = config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_SCREAMING_SNAKE_CASE = DetrConfig()
else:
# verify that the decoder is supported
_SCREAMING_SNAKE_CASE = (
decoder_config.pop("""model_type""" ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING[decoder_type]
_SCREAMING_SNAKE_CASE = config_class.from_dict(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = backbone_config
_SCREAMING_SNAKE_CASE = decoder_config
# main feature dimension for the model
_SCREAMING_SNAKE_CASE = fpn_feature_size
_SCREAMING_SNAKE_CASE = mask_feature_size
# initializer
_SCREAMING_SNAKE_CASE = init_std
_SCREAMING_SNAKE_CASE = init_xavier_std
# Hungarian matcher && loss
_SCREAMING_SNAKE_CASE = cross_entropy_weight
_SCREAMING_SNAKE_CASE = dice_weight
_SCREAMING_SNAKE_CASE = mask_weight
_SCREAMING_SNAKE_CASE = use_auxiliary_loss
_SCREAMING_SNAKE_CASE = no_object_weight
_SCREAMING_SNAKE_CASE = output_auxiliary_logits
_SCREAMING_SNAKE_CASE = self.decoder_config.encoder_attention_heads
_SCREAMING_SNAKE_CASE = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: Union[str, Any] , UpperCAmelCase_: PretrainedConfig , UpperCAmelCase_: PretrainedConfig , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return cls(
backbone_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE = self.decoder_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
from __future__ import annotations
from math import pi
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __UpperCAmelCase :
def __init__( self: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=99 , UpperCAmelCase_: List[Any]=64 , UpperCAmelCase_: Optional[Any]=5 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Optional[Any]=64 , UpperCAmelCase_: List[Any]="gelu" , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Optional[int]=512 , UpperCAmelCase_: Dict=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Tuple=0.02 , UpperCAmelCase_: List[Any]=3 , UpperCAmelCase_: Optional[Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: str ):
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : str = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__snake_case : Dict = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : str = False
__snake_case : Tuple = True
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
from __future__ import annotations
import math
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
if num <= 0:
_SCREAMING_SNAKE_CASE = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(snake_case__ )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,snake_case__ ):
if sieve[i] is True:
_SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = "encodec"
def __init__( self: Optional[Any] , UpperCAmelCase_: Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCAmelCase_: Optional[Any]=24_000 , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: int=128 , UpperCAmelCase_: Any=32 , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: Optional[Any]=[8, 5, 4, 2] , UpperCAmelCase_: List[Any]="weight_norm" , UpperCAmelCase_: Union[str, Any]=7 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: int=3 , UpperCAmelCase_: int=2 , UpperCAmelCase_: int=True , UpperCAmelCase_: int="reflect" , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=2 , UpperCAmelCase_: Dict=1.0 , UpperCAmelCase_: int=1_024 , UpperCAmelCase_: str=None , UpperCAmelCase_: str=True , **UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = target_bandwidths
_SCREAMING_SNAKE_CASE = sampling_rate
_SCREAMING_SNAKE_CASE = audio_channels
_SCREAMING_SNAKE_CASE = normalize
_SCREAMING_SNAKE_CASE = chunk_length_s
_SCREAMING_SNAKE_CASE = overlap
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_filters
_SCREAMING_SNAKE_CASE = num_residual_layers
_SCREAMING_SNAKE_CASE = upsampling_ratios
_SCREAMING_SNAKE_CASE = norm_type
_SCREAMING_SNAKE_CASE = kernel_size
_SCREAMING_SNAKE_CASE = last_kernel_size
_SCREAMING_SNAKE_CASE = residual_kernel_size
_SCREAMING_SNAKE_CASE = dilation_growth_rate
_SCREAMING_SNAKE_CASE = use_causal_conv
_SCREAMING_SNAKE_CASE = pad_mode
_SCREAMING_SNAKE_CASE = compress
_SCREAMING_SNAKE_CASE = num_lstm_layers
_SCREAMING_SNAKE_CASE = trim_right_ratio
_SCREAMING_SNAKE_CASE = codebook_size
_SCREAMING_SNAKE_CASE = codebook_dim if codebook_dim is not None else hidden_size
_SCREAMING_SNAKE_CASE = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**UpperCAmelCase_ )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCamelCase = HUGGINGFACE_HUB_CACHE
UpperCamelCase = '''config.json'''
UpperCamelCase = '''diffusion_pytorch_model.bin'''
UpperCamelCase = '''diffusion_flax_model.msgpack'''
UpperCamelCase = '''model.onnx'''
UpperCamelCase = '''diffusion_pytorch_model.safetensors'''
UpperCamelCase = '''weights.pb'''
UpperCamelCase = '''https://huggingface.co'''
UpperCamelCase = default_cache_path
UpperCamelCase = '''diffusers_modules'''
UpperCamelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
UpperCamelCase = ['''fp16''', '''non-ema''']
UpperCamelCase = '''.self_attn'''
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = '''▁'''
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = BertGenerationTokenizer
__snake_case : List[str] = False
__snake_case : Tuple = True
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = BertGenerationTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """<s>"""
_SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_002 )
def UpperCamelCase ( self: str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertGenerationTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [285, 46, 10, 170, 382] , )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """Hello World!"""
_SCREAMING_SNAKE_CASE = [18_536, 2_260, 101]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@require_torch
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
_SCREAMING_SNAKE_CASE = """ """.join(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(UpperCAmelCase_ , return_tensors="""pt""" , return_token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = BertGenerationConfig()
_SCREAMING_SNAKE_CASE = BertGenerationEncoder(UpperCAmelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase_ )
model(**UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"""input_ids""": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> np.array:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
_SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
_SCREAMING_SNAKE_CASE = ya
_SCREAMING_SNAKE_CASE = xa
for k in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(snake_case__ ,y[k] )
_SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(snake_case__ ,y[k] ) + ode_func(x + step_size ,snake_case__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_SCREAMING_SNAKE_CASE = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_SCREAMING_SNAKE_CASE = True
if a[i].islower():
_SCREAMING_SNAKE_CASE = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """laion/clap-htsat-unfused"""
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def UpperCamelCase ( self: Optional[Any] , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
_SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_list((3, 1_000) )
_SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(audios=UpperCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """This is a test string"""
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_feature_extractor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''▁'''
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCamelCase = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = ["input_ids", "attention_mask"]
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Optional[int]="</s>" , UpperCAmelCase_: List[str]="</s>" , UpperCAmelCase_: Dict="<s>" , UpperCAmelCase_: List[str]="<unk>" , UpperCAmelCase_: str="<pad>" , UpperCAmelCase_: Union[str, Any]="<mask>" , UpperCAmelCase_: Optional[Dict[str, Any]] = None , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
_SCREAMING_SNAKE_CASE = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_SCREAMING_SNAKE_CASE = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = len(self.sp_model )
_SCREAMING_SNAKE_CASE = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()}
_SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """en_XX"""
_SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang]
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self: str , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def UpperCamelCase ( self: Any , UpperCAmelCase_: str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self: str , UpperCAmelCase_: int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def UpperCamelCase ( self: int , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Any , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: List[str] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "en_XX" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "ro_RO" , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang]
_SCREAMING_SNAKE_CASE = [self.cur_lang_code_id]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
_SCREAMING_SNAKE_CASE = [self.cur_lang_code_id]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_SCREAMING_SNAKE_CASE = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 )
_SCREAMING_SNAKE_CASE = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def UpperCamelCase ( self: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: str ):
'''simple docstring'''
for example in examples:
_SCREAMING_SNAKE_CASE = video_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{"""score""": ANY(UpperCAmelCase_ ), """label""": ANY(UpperCAmelCase_ )},
{"""score""": ANY(UpperCAmelCase_ ), """label""": ANY(UpperCAmelCase_ )},
] , )
@require_torch
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_SCREAMING_SNAKE_CASE = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_SCREAMING_SNAKE_CASE = pipeline(
"""video-classification""" , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 )
_SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_SCREAMING_SNAKE_CASE = video_classifier(UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}] , )
_SCREAMING_SNAKE_CASE = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}],
[{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}],
] , )
@require_tf
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = VQModel
__snake_case : Tuple = "sample"
@property
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Optional[Any]=(32, 32) ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
_SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_SCREAMING_SNAKE_CASE = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase_ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ ).sample
_SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_SCREAMING_SNAKE_CASE = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
if len(snake_case__ ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
UpperCamelCase = parser.parse_args()
UpperCamelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "vivit"
def __init__( self: str , UpperCAmelCase_: str=224 , UpperCAmelCase_: str=32 , UpperCAmelCase_: int=[2, 16, 16] , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: str=768 , UpperCAmelCase_: Tuple=12 , UpperCAmelCase_: List[str]=12 , UpperCAmelCase_: Optional[Any]=3_072 , UpperCAmelCase_: str="gelu_fast" , UpperCAmelCase_: int=0.0 , UpperCAmelCase_: Optional[int]=0.0 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=1E-06 , UpperCAmelCase_: Optional[int]=True , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = tubelet_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**UpperCAmelCase_ )
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = (32, 32)
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
def extract(*UpperCAmelCase_: Dict , **UpperCAmelCase_: Tuple ):
class __UpperCAmelCase :
def __init__( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.ones([0] )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
self.pixel_values.to(UpperCAmelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.dummy_vae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_SCREAMING_SNAKE_CASE = 77
_SCREAMING_SNAKE_CASE = self.dummy_image.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.dummy_vae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_SCREAMING_SNAKE_CASE = 77
_SCREAMING_SNAKE_CASE = self.dummy_image.to(UpperCAmelCase_ )
# put models in fp16
_SCREAMING_SNAKE_CASE = unet.half()
_SCREAMING_SNAKE_CASE = vae.half()
_SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = alt_pipe(
[prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""np""" , image=UpperCAmelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE = init_image.resize((760, 504) )
_SCREAMING_SNAKE_CASE = """BAAI/AltDiffusion"""
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase_ , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
_SCREAMING_SNAKE_CASE = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_SCREAMING_SNAKE_CASE = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_SCREAMING_SNAKE_CASE = init_image.resize((768, 512) )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_SCREAMING_SNAKE_CASE = """BAAI/AltDiffusion"""
_SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCAmelCase_ , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = s.rsplit(snake_case__ ,snake_case__ )
return new.join(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_SCREAMING_SNAKE_CASE = key.replace(F'{group_key}.' ,F'{group_key}.group.' )
if "res_path" in key:
_SCREAMING_SNAKE_CASE = key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
_SCREAMING_SNAKE_CASE = rreplace(snake_case__ ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
_SCREAMING_SNAKE_CASE = rreplace(snake_case__ ,""".b""" ,""".bias""" ,1 )
_SCREAMING_SNAKE_CASE = value.float()
return upgrade
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=True ) -> Tuple:
"""simple docstring"""
from dall_e import Encoder
_SCREAMING_SNAKE_CASE = Encoder()
if os.path.exists(snake_case__ ):
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ )
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = ckpt.state_dict()
encoder.load_state_dict(snake_case__ )
if config_path is not None:
_SCREAMING_SNAKE_CASE = FlavaImageCodebookConfig.from_pretrained(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = FlavaImageCodebookConfig()
_SCREAMING_SNAKE_CASE = FlavaImageCodebook(snake_case__ ).eval()
_SCREAMING_SNAKE_CASE = encoder.state_dict()
_SCREAMING_SNAKE_CASE = upgrade_state_dict(snake_case__ )
hf_model.load_state_dict(snake_case__ )
_SCREAMING_SNAKE_CASE = hf_model.state_dict()
_SCREAMING_SNAKE_CASE = count_parameters(snake_case__ )
_SCREAMING_SNAKE_CASE = count_parameters(snake_case__ )
assert torch.allclose(snake_case__ ,snake_case__ ,atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "microsoft/speecht5_tts"
__snake_case : List[str] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
__snake_case : Optional[int] = "text_reader"
__snake_case : List[str] = SpeechTaProcessor
__snake_case : Union[str, Any] = SpeechTaForTextToSpeech
__snake_case : Tuple = SpeechTaHifiGan
__snake_case : List[Any] = ["text"]
__snake_case : Dict = ["audio"]
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
if self.post_processor is None:
_SCREAMING_SNAKE_CASE = """microsoft/speecht5_hifigan"""
super().setup()
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Tuple=None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.pre_processor(text=UpperCAmelCase_ , return_tensors="""pt""" , truncation=UpperCAmelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_SCREAMING_SNAKE_CASE = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_SCREAMING_SNAKE_CASE = torch.tensor(embeddings_dataset[7_305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**UpperCAmelCase_ )
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(UpperCAmelCase_ ).cpu().detach()
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if hor == 1_28:
_SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_SCREAMING_SNAKE_CASE = (32, 1_28, 2_56)
_SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_SCREAMING_SNAKE_CASE = (32, 64, 1_28, 2_56)
_SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_SCREAMING_SNAKE_CASE = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
_SCREAMING_SNAKE_CASE = model.state_dict()
_SCREAMING_SNAKE_CASE = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_SCREAMING_SNAKE_CASE = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() ,F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_SCREAMING_SNAKE_CASE = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
_SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() ,"""hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: Any , UpperCAmelCase_: str ):
'''simple docstring'''
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file:
_SCREAMING_SNAKE_CASE = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
_SCREAMING_SNAKE_CASE = input_file.read()
_SCREAMING_SNAKE_CASE = regexp.search(UpperCAmelCase_ )
return match
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file:
_SCREAMING_SNAKE_CASE = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
_SCREAMING_SNAKE_CASE = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_SCREAMING_SNAKE_CASE = regexp.finditer(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Path("""./datasets""" )
_SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Path("""./datasets""" )
_SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[Any] = "altclip_text_model"
def __init__( self: Optional[int] , UpperCAmelCase_: Union[str, Any]=250_002 , UpperCAmelCase_: List[Any]=1_024 , UpperCAmelCase_: Tuple=24 , UpperCAmelCase_: str=16 , UpperCAmelCase_: Any=4_096 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Union[str, Any]=0.1 , UpperCAmelCase_: str=514 , UpperCAmelCase_: Any=1 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Dict=0.02 , UpperCAmelCase_: Union[str, Any]=1E-05 , UpperCAmelCase_: str=1 , UpperCAmelCase_: Dict=0 , UpperCAmelCase_: str=2 , UpperCAmelCase_: str="absolute" , UpperCAmelCase_: Any=True , UpperCAmelCase_: int=768 , **UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = project_dim
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "altclip_vision_model"
def __init__( self: List[str] , UpperCAmelCase_: int=768 , UpperCAmelCase_: List[Any]=3_072 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[Any]=12 , UpperCAmelCase_: List[str]=12 , UpperCAmelCase_: int=3 , UpperCAmelCase_: Any=224 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: Optional[int]="quick_gelu" , UpperCAmelCase_: Union[str, Any]=1E-5 , UpperCAmelCase_: Any=0.0 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: Union[str, Any]=1.0 , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = projection_dim
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def UpperCamelCase ( cls: List[Any] , UpperCAmelCase_: Union[str, os.PathLike] , **UpperCAmelCase_: Any ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
_SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[int] = "altclip"
__snake_case : str = True
def __init__( self: Any , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Tuple=768 , UpperCAmelCase_: str=2.65_92 , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_config_dict""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = kwargs.pop("""vision_config_dict""" , UpperCAmelCase_ )
super().__init__(**UpperCAmelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `text_config_dict`.
_SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**UpperCAmelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_SCREAMING_SNAKE_CASE = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_SCREAMING_SNAKE_CASE = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(UpperCAmelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `vision_config_dict`.
_SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**UpperCAmelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_SCREAMING_SNAKE_CASE = {
str(UpperCAmelCase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_SCREAMING_SNAKE_CASE = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_SCREAMING_SNAKE_CASE = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(UpperCAmelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
_SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = projection_dim
_SCREAMING_SNAKE_CASE = logit_scale_init_value
_SCREAMING_SNAKE_CASE = 1.0
@classmethod
def UpperCamelCase ( cls: Dict , UpperCAmelCase_: AltCLIPTextConfig , UpperCAmelCase_: AltCLIPVisionConfig , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Any = "roberta"
def __init__( self: Union[str, Any] , UpperCAmelCase_: List[str]=50_265 , UpperCAmelCase_: Any=768 , UpperCAmelCase_: Optional[int]=12 , UpperCAmelCase_: Union[str, Any]=12 , UpperCAmelCase_: Optional[int]=3_072 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Any=512 , UpperCAmelCase_: str=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Optional[Any]=1E-12 , UpperCAmelCase_: Union[str, Any]=1 , UpperCAmelCase_: int=0 , UpperCAmelCase_: Union[str, Any]=2 , UpperCAmelCase_: List[str]="absolute" , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: str=None , **UpperCAmelCase_: Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class __UpperCAmelCase (_UpperCAmelCase ):
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = "dandelin/vilt-b32-finetuned-vqa"
__snake_case : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__snake_case : Tuple = "image_qa"
__snake_case : Optional[Any] = AutoProcessor
__snake_case : Optional[Any] = AutoModelForVisualQuestionAnswering
__snake_case : Any = ["image", "text"]
__snake_case : List[str] = ["text"]
def __init__( self: str , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: "Image" , UpperCAmelCase_: str ):
'''simple docstring'''
return self.pre_processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
with torch.no_grad():
return self.model(**UpperCAmelCase_ ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
UpperCamelCase = '''Usage of script: script_name <size_of_canvas:int>'''
UpperCamelCase = [0] * 100 + [1] * 10
random.shuffle(choice)
def __lowerCamelCase ( snake_case__ ) -> list[list[bool]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [[False for i in range(snake_case__ )] for j in range(snake_case__ )]
return canvas
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
for i, row in enumerate(snake_case__ ):
for j, _ in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = bool(random.getrandbits(1 ) )
def __lowerCamelCase ( snake_case__ ) -> list[list[bool]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(snake_case__ ):
for c, pt in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = __judge_point(
snake_case__ ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_SCREAMING_SNAKE_CASE = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_SCREAMING_SNAKE_CASE = current_canvas.tolist()
return return_canvas
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_SCREAMING_SNAKE_CASE = pt
if pt:
if alive < 2:
_SCREAMING_SNAKE_CASE = False
elif alive == 2 or alive == 3:
_SCREAMING_SNAKE_CASE = True
elif alive > 3:
_SCREAMING_SNAKE_CASE = False
else:
if alive == 3:
_SCREAMING_SNAKE_CASE = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
UpperCamelCase = int(sys.argv[1])
# main working structure of this module.
UpperCamelCase = create_canvas(canvas_size)
seed(c)
UpperCamelCase , UpperCamelCase = plt.subplots()
fig.show()
UpperCamelCase = ListedColormap(['''w''', '''k'''])
try:
while True:
UpperCamelCase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCamelCase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
UpperCamelCase = 10
UpperCamelCase = 256
def __lowerCamelCase ( snake_case__ ) -> Optional[MinHash]:
"""simple docstring"""
if len(snake_case__ ) < MIN_NUM_TOKENS:
return None
_SCREAMING_SNAKE_CASE = MinHash(num_perm=snake_case__ )
for token in set(snake_case__ ):
min_hash.update(token.encode() )
return min_hash
def __lowerCamelCase ( snake_case__ ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0}
class __UpperCAmelCase :
def __init__( self: Optional[Any] , *,
UpperCAmelCase_: float = 0.85 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = duplication_jaccard_threshold
_SCREAMING_SNAKE_CASE = NUM_PERM
_SCREAMING_SNAKE_CASE = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: MinHash ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._index.query(UpperCAmelCase_ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for base, duplicates in self._duplicate_clusters.items():
_SCREAMING_SNAKE_CASE = [base] + list(UpperCAmelCase_ )
# reformat the cluster to be a list of dict
_SCREAMING_SNAKE_CASE = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase_ )
return duplicate_clusters
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_duplicate_clusters()
with open(UpperCAmelCase_ , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( snake_case__ ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = element
_SCREAMING_SNAKE_CASE = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(snake_case__ ,max_queue_size=1_00_00 ) ,chunksize=1_00 ,):
if data is not None:
yield data
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DuplicationIndex(duplication_jaccard_threshold=snake_case__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ) ,max_queue_size=1_00 ) ):
di.add(snake_case__ ,snake_case__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_tokens(snake_case__ )
_SCREAMING_SNAKE_CASE = get_tokens(snake_case__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCamelCase = None
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for elementa in cluster:
_SCREAMING_SNAKE_CASE = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_SCREAMING_SNAKE_CASE = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(snake_case__ ,snake_case__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_SCREAMING_SNAKE_CASE = 1
extremes.append(snake_case__ )
return extremes
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[Any]:
"""simple docstring"""
global _shared_dataset
_SCREAMING_SNAKE_CASE = dataset
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = partial(_find_cluster_extremes_shared ,jaccard_threshold=snake_case__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case__ ,snake_case__ ,) ,total=len(snake_case__ ) ,):
extremes_list.append(snake_case__ )
return extremes_list
def __lowerCamelCase ( snake_case__ ,snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = make_duplicate_clusters(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = find_extremes(snake_case__ ,snake_case__ ,snake_case__ )
for extremes in extremes_clusters:
for element in extremes:
_SCREAMING_SNAKE_CASE = element
_SCREAMING_SNAKE_CASE = duplicate_indices - set(extreme_dict.keys() )
_SCREAMING_SNAKE_CASE = dataset.filter(lambda snake_case__ ,snake_case__ : idx not in remove_indices ,with_indices=snake_case__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_SCREAMING_SNAKE_CASE = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_SCREAMING_SNAKE_CASE = extreme_dict[element["""base_index"""]]["""copies"""]
print(F'Original dataset size: {len(snake_case__ )}' )
print(F'Number of duplicate clusters: {len(snake_case__ )}' )
print(F'Files in duplicate cluster: {len(snake_case__ )}' )
print(F'Unique files in duplicate cluster: {len(snake_case__ )}' )
print(F'Filtered dataset size: {len(snake_case__ )}' )
return ds_filter, duplicate_clusters
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: List[Any]=3 , UpperCAmelCase_: Any=30 , UpperCAmelCase_: str=400 , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: List[Any]=None , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Optional[int]=1 / 255 , UpperCAmelCase_: int=True , UpperCAmelCase_: Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_: Any=[0.5, 0.5, 0.5] , UpperCAmelCase_: List[Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: Any=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = DetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = DetrImageProcessingTester(self )
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_rescale""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_pad""" ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""image_id""": 39_769, """annotations""": target}
# encode them
_SCREAMING_SNAKE_CASE = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
_SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_SCREAMING_SNAKE_CASE = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify masks
_SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''MobileViTFeatureExtractor''']
UpperCamelCase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = "lxmert"
__snake_case : Tuple = {}
def __init__( self: Tuple , UpperCAmelCase_: Union[str, Any]=30_522 , UpperCAmelCase_: str=768 , UpperCAmelCase_: Optional[int]=12 , UpperCAmelCase_: str=9_500 , UpperCAmelCase_: Optional[int]=1_600 , UpperCAmelCase_: Any=400 , UpperCAmelCase_: Dict=3_072 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Union[str, Any]=512 , UpperCAmelCase_: str=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: List[str]=1E-12 , UpperCAmelCase_: Tuple=9 , UpperCAmelCase_: Union[str, Any]=5 , UpperCAmelCase_: Union[str, Any]=5 , UpperCAmelCase_: Dict=2_048 , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: int=6.67 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: str=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Dict=True , **UpperCAmelCase_: Any , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = num_qa_labels
_SCREAMING_SNAKE_CASE = num_object_labels
_SCREAMING_SNAKE_CASE = num_attr_labels
_SCREAMING_SNAKE_CASE = l_layers
_SCREAMING_SNAKE_CASE = x_layers
_SCREAMING_SNAKE_CASE = r_layers
_SCREAMING_SNAKE_CASE = visual_feat_dim
_SCREAMING_SNAKE_CASE = visual_pos_dim
_SCREAMING_SNAKE_CASE = visual_loss_normalizer
_SCREAMING_SNAKE_CASE = task_matched
_SCREAMING_SNAKE_CASE = task_mask_lm
_SCREAMING_SNAKE_CASE = task_obj_predict
_SCREAMING_SNAKE_CASE = task_qa
_SCREAMING_SNAKE_CASE = visual_obj_loss
_SCREAMING_SNAKE_CASE = visual_attr_loss
_SCREAMING_SNAKE_CASE = visual_feat_loss
_SCREAMING_SNAKE_CASE = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**UpperCAmelCase_ )
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = BlenderbotSmallTokenizer
__snake_case : List[Any] = False
def UpperCamelCase ( self: str ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """adapt act apte"""
_SCREAMING_SNAKE_CASE = """adapt act apte"""
return input_text, output_text
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_SCREAMING_SNAKE_CASE = """adapt act apte"""
_SCREAMING_SNAKE_CASE = ["""adapt""", """act""", """ap@@""", """te"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1_384]
_SCREAMING_SNAKE_CASE = """I am a small frog."""
_SCREAMING_SNAKE_CASE = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["""input_ids"""]
_SCREAMING_SNAKE_CASE = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
_SCREAMING_SNAKE_CASE = """I am a small frog ."""
_SCREAMING_SNAKE_CASE = """."""
_SCREAMING_SNAKE_CASE = tok(UpperCAmelCase_ )["""input_ids"""]
_SCREAMING_SNAKE_CASE = tok(UpperCAmelCase_ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCamelCase ( snake_case__ = 50_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [(i * (3 * i - 1)) // 2 for i in range(1 ,snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ ,len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = pentagonal_nums[j]
_SCREAMING_SNAKE_CASE = pentagonal_i + pentagonal_j
_SCREAMING_SNAKE_CASE = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __UpperCAmelCase (nn.Module ):
def __init__( self: List[Any] ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
_SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
_SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase_ ) ) )
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase_ , model.state_dict() )
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , """index.json""" )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , F'{key}.dat' )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_SCREAMING_SNAKE_CASE = torch.randn(2 , 3 , dtype=UpperCAmelCase_ )
with TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE = offload_weight(UpperCAmelCase_ , """weight""" , UpperCAmelCase_ , {} )
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , """weight.dat""" )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
self.assertDictEqual(UpperCAmelCase_ , {"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCAmelCase_ ).split(""".""" )[1]}} )
_SCREAMING_SNAKE_CASE = load_offloaded_weight(UpperCAmelCase_ , index["""weight"""] )
self.assertTrue(torch.equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ModelForTest()
_SCREAMING_SNAKE_CASE = model.state_dict()
_SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" not in k}
_SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=UpperCAmelCase_ , save_folder=UpperCAmelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase_ , weight_map[key] ) )
_SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" in k}
_SCREAMING_SNAKE_CASE = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=UpperCAmelCase_ , save_folder=UpperCAmelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
# Duplicates are removed
_SCREAMING_SNAKE_CASE = OffloadedWeightsLoader(state_dict=UpperCAmelCase_ , save_folder=UpperCAmelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase_ , weight_map[key] ) )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
_SCREAMING_SNAKE_CASE = extract_submodules_state_dict(UpperCAmelCase_ , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCAmelCase_ , {"""a.1""": 0, """a.2""": 2} )
_SCREAMING_SNAKE_CASE = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
_SCREAMING_SNAKE_CASE = extract_submodules_state_dict(UpperCAmelCase_ , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCAmelCase_ , {"""a.1.a""": 0, """a.2.a""": 2} )
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase = logging.getLogger(__name__)
class __UpperCAmelCase :
def __init__( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
if not self.initialized:
_SCREAMING_SNAKE_CASE = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.retriever.index.init_index()
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.retriever._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return doc_ids, retrieved_doc_embeds
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: int=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(UpperCAmelCase_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for worker in self.retrieval_workers
] )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_SCREAMING_SNAKE_CASE = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ray.get(random_worker.retrieve.remote(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: str , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any]=None , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
return super(UpperCAmelCase_ , cls ).get_tokenizers(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: List[str] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any]=None , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.pop("""config""" , UpperCAmelCase_ ) or RagConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rag_tokenizer.question_encoder
_SCREAMING_SNAKE_CASE = rag_tokenizer.generator
if indexed_dataset is not None:
_SCREAMING_SNAKE_CASE = """custom"""
_SCREAMING_SNAKE_CASE = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = cls._build_index(UpperCAmelCase_ )
return cls(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , retrieval_workers=UpperCAmelCase_ , index=UpperCAmelCase_ , )
| 306
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if weight_type is not None:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ ).shape
else:
_SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "running_mean":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "running_var":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "num_batches_tracked":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "inv_freq":
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,hf_model.config.feat_extract_norm == """group""" ,)
_SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE = name.split(snake_case__ )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" ,snake_case__ )
if "pos_bias_u" in name:
_SCREAMING_SNAKE_CASE = None
elif "pos_bias_v" in name:
_SCREAMING_SNAKE_CASE = None
elif "weight_g" in name:
_SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_SCREAMING_SNAKE_CASE = """weight"""
elif "running_mean" in name:
_SCREAMING_SNAKE_CASE = """running_mean"""
elif "inv_freq" in name:
_SCREAMING_SNAKE_CASE = """inv_freq"""
elif "running_var" in name:
_SCREAMING_SNAKE_CASE = """running_var"""
elif "num_batches_tracked" in name:
_SCREAMING_SNAKE_CASE = """num_batches_tracked"""
else:
_SCREAMING_SNAKE_CASE = None
set_recursively(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE = name.split(""".""" )
_SCREAMING_SNAKE_CASE = int(items[0] )
_SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=True ) -> int:
"""simple docstring"""
if config_path is not None:
_SCREAMING_SNAKE_CASE = WavaVecaConformerConfig.from_pretrained(snake_case__ ,hidden_act="""swish""" )
else:
_SCREAMING_SNAKE_CASE = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_SCREAMING_SNAKE_CASE = """rotary"""
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE = target_dict.pad_index
_SCREAMING_SNAKE_CASE = target_dict.bos_index
_SCREAMING_SNAKE_CASE = target_dict.eos_index
_SCREAMING_SNAKE_CASE = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
_SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
snake_case__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=snake_case__ ,)
_SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=snake_case__ ,return_attention_mask=snake_case__ ,)
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=snake_case__ ,tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE = WavaVecaConformerForCTC(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = WavaVecaConformerForPreTraining(snake_case__ )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
_SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=snake_case__ )
_SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(snake_case__ ,snake_case__ ,not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
else:
_SCREAMING_SNAKE_CASE = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ )
_SCREAMING_SNAKE_CASE = ["""key_proj""", """value_proj""", """query_proj"""]
_SCREAMING_SNAKE_CASE = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE = prophet
_SCREAMING_SNAKE_CASE = prophet_old
else:
_SCREAMING_SNAKE_CASE = prophet.prophetnet
_SCREAMING_SNAKE_CASE = prophet_old.model
_SCREAMING_SNAKE_CASE = False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE = mapping[attribute]
if not hasattr(snake_case__ ,snake_case__ ) and len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = attribute
elif hasattr(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.weight
logger.info(F'{attribute} is initialized.' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE = old_model.bias
logger.info(F'{attribute} is initialized' )
_SCREAMING_SNAKE_CASE = True
break
elif attribute in special_keys and hasattr(snake_case__ ,"""in_proj_weight""" ):
_SCREAMING_SNAKE_CASE = old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE = True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE = model[int(snake_case__ )]
_SCREAMING_SNAKE_CASE = old_model[int(snake_case__ )]
else:
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if old_attribute == "":
_SCREAMING_SNAKE_CASE = old_model
else:
if not hasattr(snake_case__ ,snake_case__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_SCREAMING_SNAKE_CASE = getattr(snake_case__ ,snake_case__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
| 1
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
_SCREAMING_SNAKE_CASE = 0.01
with locka.acquire():
with pytest.raises(snake_case__ ):
_SCREAMING_SNAKE_CASE = time.time()
locka.acquire(snake_case__ )
assert time.time() - _start > timeout
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """a""" * 10_00 + """.lock"""
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(snake_case__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
_SCREAMING_SNAKE_CASE = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case__ ):
locka.acquire(0 )
| 306
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
|
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = "donut-swin"
__snake_case : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: List[Any] , UpperCAmelCase_: List[str]=224 , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: int=3 , UpperCAmelCase_: Any=96 , UpperCAmelCase_: Optional[Any]=[2, 2, 6, 2] , UpperCAmelCase_: Dict=[3, 6, 12, 24] , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: List[str]=4.0 , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: List[str]=0.0 , UpperCAmelCase_: Dict=0.0 , UpperCAmelCase_: str=0.1 , UpperCAmelCase_: List[str]="gelu" , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: List[str]=1E-5 , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = window_size
_SCREAMING_SNAKE_CASE = mlp_ratio
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = use_absolute_embeddings
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
| 306
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
_SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : PreTrainedTokenizerBase
__snake_case : Union[bool, str, PaddingStrategy] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : int = -100
__snake_case : str = "pt"
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels"""
_SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1]
_SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
_SCREAMING_SNAKE_CASE = [
list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels
]
else:
_SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels
]
_SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features]
_SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
| 1
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=8 ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: List[str] , UpperCAmelCase_: UNetaDConditionModel , UpperCAmelCase_: DDPMScheduler , UpperCAmelCase_: VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: int ):
'''simple docstring'''
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(F'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self: Any ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self: Union[str, Any] , UpperCAmelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_: torch.FloatTensor , UpperCAmelCase_: int = 512 , UpperCAmelCase_: int = 512 , UpperCAmelCase_: int = 100 , UpperCAmelCase_: float = 4.0 , UpperCAmelCase_: int = 1 , UpperCAmelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_: Optional[torch.FloatTensor] = None , UpperCAmelCase_: Optional[str] = "pil" , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ , dim=0 )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ , dim=0 )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
_SCREAMING_SNAKE_CASE = hint.repeat_interleave(UpperCAmelCase_ , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase_ )
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.movq.config.latent_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""image_embeds""": image_embeds, """hint""": hint}
_SCREAMING_SNAKE_CASE = self.unet(
sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 306
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = TextToVideoSDPipeline
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Optional[int] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 306
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def __lowerCamelCase ( snake_case__ ,snake_case__ = 16 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE = 8
else:
_SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
snake_case__ ,padding="""longest""" ,max_length=snake_case__ ,pad_to_multiple_of=snake_case__ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
_SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,snake_case__ ) == "1":
_SCREAMING_SNAKE_CASE = 2
# New Code #
_SCREAMING_SNAKE_CASE = int(args.gradient_accumulation_steps )
_SCREAMING_SNAKE_CASE = int(args.local_sgd_steps )
# Initialize accelerator
_SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_dataloaders(snake_case__ ,snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() ,lr=snake_case__ )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=snake_case__ ,num_warmup_steps=1_00 ,num_training_steps=(len(snake_case__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ ,model=snake_case__ ,local_sgd_steps=snake_case__ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ ,references=snake_case__ ,)
_SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' ,snake_case__ )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case__ ,default=snake_case__ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=snake_case__ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=snake_case__ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ ,snake_case__ )
if __name__ == "__main__":
main()
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int=13 , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=33 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: List[Any]=5 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Any=37 , UpperCAmelCase_: Optional[Any]="gelu" , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: int=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Tuple=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = EsmForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = False
__snake_case : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = ()
__snake_case : Dict = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = True
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_SCREAMING_SNAKE_CASE = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE = create_position_ids_from_input_ids(UpperCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE = EsmEmbeddings(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.empty(2 , 4 , 30 )
_SCREAMING_SNAKE_CASE = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ):
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = 33
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : str = StableUnCLIPImgaImgPipeline
__snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__snake_case : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__snake_case : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case : List[str] = frozenset([] )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = embedder_hidden_size
# image encoding components
_SCREAMING_SNAKE_CASE = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL()
_SCREAMING_SNAKE_CASE = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: str=0 , UpperCAmelCase_: Dict=True ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if pil_image:
_SCREAMING_SNAKE_CASE = input_image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = input_image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_SCREAMING_SNAKE_CASE = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase_ )
inputs.update({"""image_embeds""": None} )
_SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase_ ).images
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , """anime turle""" , generator=UpperCAmelCase_ , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase_ , """anime turle""" , generator=UpperCAmelCase_ , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = pipe(
UpperCAmelCase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 306
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: str ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , **UpperCAmelCase_: str ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
_SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = image_processor(UpperCAmelCase_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_image_processor()
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """lower newer"""
_SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 306
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 1
|
from collections.abc import Sequence
def __lowerCamelCase ( snake_case__ = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_SCREAMING_SNAKE_CASE = nums[0]
for i in range(1 ,len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = nums[i]
_SCREAMING_SNAKE_CASE = max(snake_case__ ,ans + num ,snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase = int(input('''Enter number of elements : ''').strip())
UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: str , UpperCAmelCase_: pyspark.sql.DataFrame , UpperCAmelCase_: Optional[NamedSplit] = None , UpperCAmelCase_: Optional[Features] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: str = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: str = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: str = "arrow" , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = load_from_cache_file
_SCREAMING_SNAKE_CASE = file_format
_SCREAMING_SNAKE_CASE = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_SCREAMING_SNAKE_CASE = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 306
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 1
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_SCREAMING_SNAKE_CASE = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 , 80 ):
_SCREAMING_SNAKE_CASE = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.padding()
_SCREAMING_SNAKE_CASE = self.split_blocks()
for block in self.blocks:
_SCREAMING_SNAKE_CASE = self.expand_block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_SCREAMING_SNAKE_CASE = (b & c) | ((~b) & d)
_SCREAMING_SNAKE_CASE = 0x5a_827_999
elif 20 <= i < 40:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0x6e_d9e_ba1
elif 40 <= i < 60:
_SCREAMING_SNAKE_CASE = (b & c) | (b & d) | (c & d)
_SCREAMING_SNAKE_CASE = 0x8f_1bb_cdc
elif 60 <= i < 80:
_SCREAMING_SNAKE_CASE = b ^ c ^ d
_SCREAMING_SNAKE_CASE = 0xca_62c_1d6
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
self.rotate(UpperCAmelCase_ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(UpperCAmelCase_ , 30 ),
c,
d,
)
_SCREAMING_SNAKE_CASE = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = b"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
else:
_SCREAMING_SNAKE_CASE = bytes(snake_case__ ,"""utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 306
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCamelCase = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class __UpperCAmelCase :
__snake_case : str
__snake_case : Optional[str] = None
__snake_case : Optional[Union[str, int]] = None
__snake_case : Optional[Union[str, int]] = None
__snake_case : Optional[Union[str, int]] = None
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _str_to_version_tuple(self.version_str )
def __repr__( self: int ):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return self.major, self.minor, self.patch
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return Version(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return other
raise TypeError(F'{other} (type {type(UpperCAmelCase_ )}) cannot be compared to version.' )
def __eq__( self: Optional[int] , UpperCAmelCase_: Any ):
'''simple docstring'''
try:
_SCREAMING_SNAKE_CASE = self._validate_operand(UpperCAmelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._validate_operand(UpperCAmelCase_ )
return self.tuple < other.tuple
def __hash__( self: Optional[int] ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase ( cls: str , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return self.version_str
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
return ".".join(str(snake_case__ ) for v in version_tuple )
| 306
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 1
|
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_failure_array(snake_case__ )
# 2) Step through text searching for pattern
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0 # index into text, pattern
while i < len(snake_case__ ):
if pattern[j] == text[i]:
if j == (len(snake_case__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_SCREAMING_SNAKE_CASE = failure[j - 1]
continue
i += 1
return False
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [0]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
while j < len(snake_case__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_SCREAMING_SNAKE_CASE = failure[i - 1]
continue
j += 1
failure.append(snake_case__ )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase = '''abc1abc12'''
UpperCamelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCamelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase = '''ABABX'''
UpperCamelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
UpperCamelCase = '''AAAB'''
UpperCamelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
UpperCamelCase = '''abcdabcy'''
UpperCamelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
UpperCamelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : jnp.ndarray
__snake_case : jnp.ndarray
class __UpperCAmelCase (nn.Module ):
__snake_case : int
__snake_case : Tuple[int] = (16, 32, 96, 256)
__snake_case : jnp.dtype = jnp.floataa
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
_SCREAMING_SNAKE_CASE = self.block_out_channels[i]
_SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = blocks
_SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: Tuple , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.conv_in(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.silu(UpperCAmelCase_ )
for block in self.blocks:
_SCREAMING_SNAKE_CASE = block(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.silu(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.conv_out(UpperCAmelCase_ )
return embedding
@flax_register_to_config
class __UpperCAmelCase (nn.Module ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : int = 32
__snake_case : int = 4
__snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case : Union[bool, Tuple[bool]] = False
__snake_case : Tuple[int] = (320, 640, 1280, 1280)
__snake_case : int = 2
__snake_case : Union[int, Tuple[int]] = 8
__snake_case : Optional[Union[int, Tuple[int]]] = None
__snake_case : int = 1280
__snake_case : float = 0.0
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
__snake_case : bool = True
__snake_case : int = 0
__snake_case : str = "rgb"
__snake_case : Tuple[int] = (16, 32, 96, 256)
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: jax.random.KeyArray ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
_SCREAMING_SNAKE_CASE = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
_SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
_SCREAMING_SNAKE_CASE = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = jax.random.split(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"]
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.block_out_channels
_SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
_SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype )
_SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = block_out_channels[0]
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
_SCREAMING_SNAKE_CASE = output_channel
_SCREAMING_SNAKE_CASE = block_out_channels[i]
_SCREAMING_SNAKE_CASE = i == len(UpperCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase_ )
for _ in range(self.layers_per_block ):
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase_ )
if not is_final_block:
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = down_blocks
_SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
_SCREAMING_SNAKE_CASE = block_out_channels[-1]
_SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: float = 1.0 , UpperCAmelCase_: bool = True , UpperCAmelCase_: bool = False , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_SCREAMING_SNAKE_CASE = jnp.flip(UpperCAmelCase_ , axis=1 )
# 1. time
if not isinstance(UpperCAmelCase_ , jnp.ndarray ):
_SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jnp.expand_dims(UpperCAmelCase_ , 0 )
_SCREAMING_SNAKE_CASE = self.time_proj(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.time_embedding(UpperCAmelCase_ )
# 2. pre-process
_SCREAMING_SNAKE_CASE = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE = self.conv_in(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(UpperCAmelCase_ )
sample += controlnet_cond
# 3. down
_SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_SCREAMING_SNAKE_CASE = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
# 5. contronet blocks
_SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase_ , self.controlnet_down_blocks ):
_SCREAMING_SNAKE_CASE = controlnet_block(UpperCAmelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
_SCREAMING_SNAKE_CASE = self.controlnet_mid_block(UpperCAmelCase_ )
# 6. scaling
_SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase_ , mid_block_res_sample=UpperCAmelCase_ )
| 306
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = gather(snake_case__ )
assert gathered_tensor.tolist() == list(range(1 ,state.num_processes**2 + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [state.process_index]
_SCREAMING_SNAKE_CASE = gather_object(snake_case__ )
assert len(snake_case__ ) == state.num_processes, F'{gathered_obj}, {len(snake_case__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = broadcast(snake_case__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 ,state.num_processes + 1 ) )
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if state.is_main_process:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_SCREAMING_SNAKE_CASE = torch.arange(state.num_processes ).to(state.device )
_SCREAMING_SNAKE_CASE = pad_across_processes(snake_case__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 ,state.num_processes ) ) + [0]
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""sum""" )
_SCREAMING_SNAKE_CASE = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if state.num_processes != 2:
return
_SCREAMING_SNAKE_CASE = create_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = reduce(snake_case__ ,"""mean""" )
_SCREAMING_SNAKE_CASE = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case__ ,snake_case__ ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(snake_case__ )
state.print("""testing gather_object""" )
test_gather_object(snake_case__ )
state.print("""testing broadcast""" )
test_broadcast(snake_case__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case__ )
if __name__ == "__main__":
main()
| 306
| 1
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__="attention" ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
_SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
_SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
_SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def __lowerCamelCase ( snake_case__ ,*, snake_case__ ,snake_case__ ,snake_case__ = False ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["""target"""] )
_SCREAMING_SNAKE_CASE = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_SCREAMING_SNAKE_CASE = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
_SCREAMING_SNAKE_CASE = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,"""pre_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,"""attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,"""pre_mlp_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(snake_case__ ,snake_case__ ,"""encoder""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = wi[0].T
_SCREAMING_SNAKE_CASE = wi[1].T
else:
_SCREAMING_SNAKE_CASE = wi.T
_SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
snake_case__ ,snake_case__ ,"""encoder""" ).T
_SCREAMING_SNAKE_CASE = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
snake_case__ ,0 ,"""encoder""" ).T
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
snake_case__ ,0 ,"""decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""pre_self_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""self_attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""pre_cross_attention_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_attention_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""encoder_decoder_attention""" )
_SCREAMING_SNAKE_CASE = layer_norm
_SCREAMING_SNAKE_CASE = k.T
_SCREAMING_SNAKE_CASE = o.T
_SCREAMING_SNAKE_CASE = q.T
_SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
_SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,"""pre_mlp_layer_norm""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tax_mlp_lookup(snake_case__ ,snake_case__ ,"""decoder""" ,snake_case__ )
_SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE = wi[0].T
_SCREAMING_SNAKE_CASE = wi[1].T
else:
_SCREAMING_SNAKE_CASE = wi.T
_SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(snake_case__ ,snake_case__ ,"""decoder""" ).T
_SCREAMING_SNAKE_CASE = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_SCREAMING_SNAKE_CASE = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_SCREAMING_SNAKE_CASE = state_dict["""shared.weight"""]
return state_dict
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(snake_case__ )
_SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
snake_case__ ,num_layers=config.num_layers ,is_encoder_only=snake_case__ ,scalable_attention=snake_case__ )
_SCREAMING_SNAKE_CASE = make_state_dict(snake_case__ ,snake_case__ )
model.load_state_dict(snake_case__ ,strict=snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = False ,snake_case__ = False ,) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_SCREAMING_SNAKE_CASE = UMTaEncoderModel(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 306
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCamelCase ( ) -> tuple[list[int], int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [randint(-10_00 ,10_00 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE = randint(-50_00 ,50_00 )
return (arr, r)
UpperCamelCase = make_dataset()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(snake_case__ ,3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_SCREAMING_SNAKE_CASE = len(snake_case__ )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCamelCase ( ) -> tuple[float, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum1(*dataset)
"""
_SCREAMING_SNAKE_CASE = """
triplet_sum2(*dataset)
"""
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
_SCREAMING_SNAKE_CASE = repeat(setup=snake_case__ ,stmt=snake_case__ ,repeat=5 ,number=1_00_00 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 306
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.