code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __magic_name__ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self, lowercase_ = " " ) -> str:
"""simple docstring"""
a__ =sentence_delimiter
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
a__ =[]
for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE__ ):
chars.extend(self.process_string(SCREAMING_SNAKE_CASE__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase = '''\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'''
lowerCamelCase = '''\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'''
lowerCamelCase = '''\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
], )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> int:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, truth_transform=SCREAMING_SNAKE_CASE__, hypothesis_transform=SCREAMING_SNAKE_CASE__, )["wer"]
a__ =0
a__ =0
for prediction, reference in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a__ =jiwer.compute_measures(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, truth_transform=SCREAMING_SNAKE_CASE__, hypothesis_transform=SCREAMING_SNAKE_CASE__, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 188 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''llama'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''past_key_values''']
def __init__( self ,SCREAMING_SNAKE_CASE__=3_20_00 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=1_10_08 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="silu" ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-6 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE :int = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[str] = hidden_size
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__SCREAMING_SNAKE_CASE :Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE :str = num_key_value_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE :List[str] = initializer_range
__SCREAMING_SNAKE_CASE :Union[str, Any] = rms_norm_eps
__SCREAMING_SNAKE_CASE :Dict = pretraining_tp
__SCREAMING_SNAKE_CASE :Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE :Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,tie_word_embeddings=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.rope_scaling.get('''type''' ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.rope_scaling.get('''factor''' ,SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 191 | 0 |
from math import factorial
lowerCAmelCase_ = {str(d): factorial(d) for d in range(10)}
def _snake_case ( lowerCAmelCase: int )-> int:
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCAmelCase ) )
def _snake_case ( )-> int:
_snake_case : str = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowerCAmelCase ) if sum_of_digit_factorial(lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359 |
from math import ceil
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : Union[str, Any] = list(range(0 , lowerCAmelCase ) )
_snake_case : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_snake_case : Any = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase )
# Missing blocks
_snake_case : Dict = [i for i in blocks if i not in device_map_blocks]
_snake_case : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase ) )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> Optional[Any]:
_snake_case : int = list(range(lowerCAmelCase ) )
_snake_case : Union[str, Any] = int(ceil(n_layers / len(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase , lowerCAmelCase )]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
| 260 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Tuple = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A_ :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = MBartTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : str = False if not self.vocab_file else True
UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding:
UpperCAmelCase_ : List[Any] = src_lang
UpperCAmelCase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None:
UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None:
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 268 | 0 |
import random
def __lowercase ( a__ ) -> bool:
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
__SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
__SCREAMING_SNAKE_CASE = random.randrange(2 , num - 1 )
__SCREAMING_SNAKE_CASE = pow(a__ , a__ , a__ )
if v != 1:
__SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__SCREAMING_SNAKE_CASE = i + 1
__SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowercase ( a__ ) -> bool:
if num < 2:
return False
__SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a__ )
def __lowercase ( a__ = 10_24 ) -> int:
while True:
__SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a__ ):
return num
if __name__ == "__main__":
lowerCAmelCase__ : Dict =generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 118 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ : Dict =random.Random()
if is_torch_available():
import torch
def __lowercase ( a__ , a__=1.0 , a__=None , a__=None ) -> Any:
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _A , _A=7 , _A=400 , _A=2_000 , _A=1 , _A=0.0 , _A=16_000 , _A=True , _A=True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
def _A ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ASTFeatureExtractor
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(_A )
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def _A ( self ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _A ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(_A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 118 | 1 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': 512,
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : str="<|endoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]="<|endoftext|>" , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : str=True , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : List[Any] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : str = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( _lowerCAmelCase , unittest.TestCase ):
A = DiTPipeline
A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A = False
def __snake_case (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=SCREAMING_SNAKE_CASE_, activation_fn="""gelu-approximate""", num_embeds_ada_norm=1000, norm_type="""ada_norm_zero""", norm_elementwise_affine=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Any = AutoencoderKL()
UpperCAmelCase_: str = DDIMScheduler()
UpperCAmelCase_: List[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCAmelCase_: Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = """cpu"""
UpperCAmelCase_: Tuple = self.get_dummy_components()
UpperCAmelCase_: Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
UpperCAmelCase_: Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
UpperCAmelCase_: str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1E-3 )
def __snake_case (self ) -> Optional[Any]:
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE_, expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", )
def __snake_case (self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
UpperCAmelCase_: Optional[int] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
UpperCAmelCase_: Optional[int] = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=40, output_type="""np""" ).images
for word, image in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Dict = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
UpperCAmelCase_: List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
UpperCAmelCase_: int = ["""vase""", """umbrella"""]
UpperCAmelCase_: Dict = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_: Dict = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=25, output_type="""np""" ).images
for word, image in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 357 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
a : Dict = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
a : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ElectraTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[int] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Union[str, Any] = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: List[Any] = tokenize_chinese_chars
UpperCAmelCase_: int = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Optional[int] = [self.sep_token_id]
UpperCAmelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :int = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Any ='''time_series_transformer'''
lowercase_ : str ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self ,A__ = None ,A__ = None ,A__ = "student_t" ,A__ = "nll" ,A__ = 1 ,A__ = [1, 2, 3, 4, 5, 6, 7] ,A__ = "mean" ,A__ = 0 ,A__ = 0 ,A__ = 0 ,A__ = 0 ,A__ = None ,A__ = None ,A__ = 3_2 ,A__ = 3_2 ,A__ = 2 ,A__ = 2 ,A__ = 2 ,A__ = 2 ,A__ = True ,A__ = "gelu" ,A__ = 6_4 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 1_0_0 ,A__ = 0.02 ,A__=True ,**A__ ,):
# time series specific configuration
lowercase = prediction_length
lowercase = context_length or prediction_length
lowercase = distribution_output
lowercase = loss
lowercase = input_size
lowercase = num_time_features
lowercase = lags_sequence
lowercase = scaling
lowercase = num_dynamic_real_features
lowercase = num_static_real_features
lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A__) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
lowercase = cardinality
else:
lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A__) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
lowercase = embedding_dimension
else:
lowercase = [min(5_0 ,(cat + 1) // 2) for cat in self.cardinality]
lowercase = num_parallel_samples
# Transformer architecture configuration
lowercase = input_size * len(A__) + self._number_of_features
lowercase = d_model
lowercase = encoder_attention_heads
lowercase = decoder_attention_heads
lowercase = encoder_ffn_dim
lowercase = decoder_ffn_dim
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = activation_function
lowercase = init_std
lowercase = use_cache
super().__init__(is_encoder_decoder=A__ ,**A__)
@property
def A__ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 101 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self):
lowercase = 1
lowercase = 3
lowercase = (3_2, 3_2)
lowercase = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0)).to(A__)
return image
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_0_0_6 ,)
return RobertaSeriesModelWithTransformation(A__)
@property
def A__ ( self):
def extract(*A__ ,**A__):
class lowercase :
def __init__( self):
lowercase = torch.ones([0])
def A__ ( self ,A__):
self.pixel_values.to(A__)
return self
return Out()
return extract
def A__ ( self):
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=A__)
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
lowercase = 7_7
lowercase = self.dummy_image.to(A__)
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=A__ ,scheduler=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,safety_checker=A__ ,feature_extractor=self.dummy_extractor ,)
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=A__)
lowercase = alt_pipe.to(A__)
alt_pipe.set_progress_bar_config(disable=A__)
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,)
lowercase = output.images
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def A__ ( self):
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=A__)
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
lowercase = 7_7
lowercase = self.dummy_image.to(A__)
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=A__ ,scheduler=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,safety_checker=A__ ,feature_extractor=self.dummy_extractor ,)
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=A__)
lowercase = alt_pipe.to(A__)
alt_pipe.set_progress_bar_config(disable=A__)
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((7_6_0, 5_0_4))
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
A__ ,safety_checker=A__ ,)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0)
lowercase = pipe(
prompt=A__ ,image=A__ ,strength=0.75 ,guidance_scale=7.5 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
lowercase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowercase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
lowercase = init_image.resize((7_6_8, 5_1_2))
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
A__ ,safety_checker=A__ ,)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0)
lowercase = pipe(
prompt=A__ ,image=A__ ,strength=0.75 ,guidance_scale=7.5 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 101 | 1 |
from math import factorial
lowercase : int = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase_ (_lowerCAmelCase : Any ) -> Optional[Any]:
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCamelCase ) )
def UpperCAmelCase_ () -> Union[str, Any]:
__UpperCamelCase : Tuple = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCamelCase ) if sum_of_digit_factorial(__lowerCamelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""") | 351 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 171 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCamelCase : Any = "\\n Text data.\n Second line of data."
__lowerCamelCase : str = "file"
@pytest.fixture(scope="session" )
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCamelCase : Optional[Any] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def A_ ( _lowerCAmelCase ) -> Dict:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : List[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCamelCase : Tuple = input_paths[compression_format]
UpperCamelCase : Tuple = tmp_path / "cache"
UpperCamelCase : Any = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase : int = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase : Optional[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Union[str, Any] = "custom_cache"
UpperCamelCase : List[Any] = "custom_extracted_dir"
UpperCamelCase : Any = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCamelCase : str = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase : Dict = xz_file
UpperCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
UpperCamelCase : Dict = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
UpperCamelCase : Union[str, Any] = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Union[str, Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
UpperCamelCase : Optional[Any] = "./__missing_file__.txt"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Optional[int] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def A_ ( ) -> Any:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("https://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("ftp://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("s3://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("s3://huggingface.co" )
| 52 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.pruning_method
_UpperCAmelCase = args.threshold
_UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
_UpperCAmelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
_UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1
_UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = s * (r - l) + l
_UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCAmelCase = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Optional[int] = parser.parse_args()
main(args)
| 260 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : Optional[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCamelCase : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowercase ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any ) -> Optional[Any]:
_a : Optional[Any] = AudioClassificationPipeline(model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
# test with a raw waveform
_a : Tuple = np.zeros((34000,) )
_a : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> int:
_a , _a : Union[str, Any] = examples
_a : Optional[Any] = audio_classifier(UpperCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCAmelCase__ , [
{"""score""": ANY(UpperCAmelCase__ ), """label""": ANY(UpperCAmelCase__ )},
{"""score""": ANY(UpperCAmelCase__ ), """label""": ANY(UpperCAmelCase__ )},
] , )
_a : List[str] = audio_classifier(UpperCAmelCase__ , top_k=1 )
self.assertEqual(
UpperCAmelCase__ , [
{"""score""": ANY(UpperCAmelCase__ ), """label""": ANY(UpperCAmelCase__ )},
] , )
self.run_torchaudio(UpperCAmelCase__ )
@require_torchaudio
def _lowercase ( self : str , UpperCAmelCase__ : List[str] ) -> List[Any]:
import datasets
# test with a local file
_a : Any = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_a : int = dataset[0]["""audio"""]["""array"""]
_a : List[Any] = audio_classifier(UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{"""score""": ANY(UpperCAmelCase__ ), """label""": ANY(UpperCAmelCase__ )},
{"""score""": ANY(UpperCAmelCase__ ), """label""": ANY(UpperCAmelCase__ )},
] , )
@require_torch
def _lowercase ( self : Tuple ) -> Any:
_a : str = """anton-l/wav2vec2-random-tiny-classifier"""
_a : str = pipeline("""audio-classification""" , model=UpperCAmelCase__ )
_a : Optional[int] = np.ones((8000,) )
_a : Any = audio_classifier(UpperCAmelCase__ , top_k=4 )
_a : int = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_a : List[Any] = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_a : Tuple = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_a : Tuple = audio_classifier(UpperCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowercase ( self : str ) -> List[Any]:
import datasets
_a : List[Any] = """superb/wav2vec2-base-superb-ks"""
_a : List[str] = pipeline("""audio-classification""" , model=UpperCAmelCase__ )
_a : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_a : List[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_a : List[str] = audio_classifier(UpperCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
pass
| 324 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def a__ ( __UpperCamelCase = "isbn/0140328726" ):
SCREAMING_SNAKE_CASE_ = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
SCREAMING_SNAKE_CASE_ = F'''{olid} is not a valid Open Library olid'''
raise ValueError(__UpperCamelCase )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
SCREAMING_SNAKE_CASE_ = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = ", ".join(__UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
A : Optional[int] = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(f"\nSearching Open Library for ISBN: {isbn}...\n")
try:
A : Optional[Any] = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print("\n".join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"Sorry, there are no results for ISBN: {isbn}.")
| 118 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''char'''
lowerCamelCase__ = '''bpe'''
lowerCamelCase__ = '''wp'''
A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings["input_ids"]
return inputs
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences
SCREAMING_SNAKE_CASE_ = char_preds.size(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = final_strs
SCREAMING_SNAKE_CASE_ = final_scores
SCREAMING_SNAKE_CASE_ = char_strs
SCREAMING_SNAKE_CASE_ = bpe_strs
SCREAMING_SNAKE_CASE_ = wp_strs
return out
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any:
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE_ = self.char_decode
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = "[s]"
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE_ = self.bpe_decode
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = "#"
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE_ = self.wp_decode
SCREAMING_SNAKE_CASE_ = 102
SCREAMING_SNAKE_CASE_ = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = pred_logits.size(0 )
SCREAMING_SNAKE_CASE_ = pred_logits.size(1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:]
SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:]
for index in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__magic_name__ )
conf_scores.append(__magic_name__ )
return dec_strs, conf_scores
def __A ( self : Any , __magic_name__ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__magic_name__ )
def __A ( self : str , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
| 118 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """RegNetConfig"""
# Base docstring
lowercase_ = """facebook/regnet-y-040"""
lowercase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowercase_ = """facebook/regnet-y-040"""
lowercase_ = """tabby, tabby cat"""
lowercase_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : int , a : int , a : int , a : int = 3 , a : int = 1 , a : int = 1 , a : Optional[str] = "relu" , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(
a , a , kernel_size=a , stride=a , padding=kernel_size // 2 , groups=a , bias=a , )
lowercase__ = nn.BatchNormad(a )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = self.convolution(a )
lowercase__ = self.normalization(a )
lowercase__ = self.activation(a )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[Any] , a : RegNetConfig )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__ = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple )-> int:
"""simple docstring"""
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__ = self.embedder(a )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : int , a : int , a : int = 2 )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(a , a , kernel_size=1 , stride=a , bias=a )
lowercase__ = nn.BatchNormad(a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tensor )-> Tensor:
"""simple docstring"""
lowercase__ = self.convolution(a )
lowercase__ = self.normalization(a )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] , a : int , a : int )-> int:
"""simple docstring"""
super().__init__()
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ = nn.Sequential(
nn.Convad(a , a , kernel_size=1 ) , nn.ReLU() , nn.Convad(a , a , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> str:
"""simple docstring"""
lowercase__ = self.pooler(a )
lowercase__ = self.attention(a )
lowercase__ = hidden_state * attention
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : RegNetConfig , a : int , a : int , a : int = 1 )-> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(a , a , stride=a ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(a , a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a , a , stride=a , groups=a , activation=config.hidden_act ) , RegNetConvLayer(a , a , kernel_size=1 , activation=a ) , )
lowercase__ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(a )
lowercase__ = self.shortcut(a )
hidden_state += residual
lowercase__ = self.activation(a )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] , a : RegNetConfig , a : int , a : int , a : int = 1 )-> str:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(a , a , stride=a ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(a , a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a , a , stride=a , groups=a , activation=config.hidden_act ) , RegNetSELayer(a , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(a , a , kernel_size=1 , activation=a ) , )
lowercase__ = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(a )
lowercase__ = self.shortcut(a )
hidden_state += residual
lowercase__ = self.activation(a )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Any , a : RegNetConfig , a : int , a : int , a : int = 2 , a : int = 2 , )-> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
a , a , a , stride=a , ) , *[layer(a , a , a ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.layers(a )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[Any] , a : RegNetConfig )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a , config.depths[1:] ):
self.stages.append(RegNetStage(a , a , a , depth=a ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Tensor , a : bool = False , a : bool = True )-> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(a )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a , hidden_states=a )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = RegNetConfig
_UpperCamelCase : int = 'regnet'
_UpperCamelCase : Optional[int] = 'pixel_values'
_UpperCamelCase : str = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Any )-> List[str]:
"""simple docstring"""
if isinstance(a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any , a : Optional[int]=False )-> Any:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = value
lowercase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : Tuple )-> Tuple:
"""simple docstring"""
super().__init__(a )
lowercase__ = config
lowercase__ = RegNetEmbeddings(a )
lowercase__ = RegNetEncoder(a )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Tensor , a : Optional[bool] = None , a : Optional[bool] = None )-> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(a )
lowercase__ = self.encoder(
a , output_hidden_states=a , return_dict=a )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : int )-> Dict:
"""simple docstring"""
super().__init__(a )
lowercase__ = config.num_labels
lowercase__ = RegNetModel(a )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[torch.FloatTensor] = None , a : Optional[torch.LongTensor] = None , a : Optional[bool] = None , a : Optional[bool] = None , )-> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(a , output_hidden_states=a , return_dict=a )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(a )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = 'single_label_classification'
else:
lowercase__ = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(a , a )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(a , a )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a , logits=a , hidden_states=outputs.hidden_states )
| 269 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowercase_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , a : Tuple , a : Optional[int]=False , a : str=False , a : str=False , a : Tuple=None , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Optional[Dict[str, Any]] = None , **a : Optional[int] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
lowercase__ = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ = '<|endoftext|>' if eos_token is None else eos_token
lowercase__ = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ = unk_token if pad_token is None else pad_token
lowercase__ = eos_token if bos_token is None else bos_token
else:
lowercase__ = '<pad>' if pad_token is None else pad_token
lowercase__ = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ = re.compile(
f"""[{"".join(map(a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Any )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , a : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str )-> str:
"""simple docstring"""
lowercase__ = self.non_printing_characters_re.sub('' , a )
# Normalize whitespaces
lowercase__ = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
lowercase__ = unicodedata.normalize('NFC' , a )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , **a : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(a )
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> int:
"""simple docstring"""
return self.sp_model.PieceToId(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> str:
"""simple docstring"""
return self.sp_model.IdToPiece(a )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : str )-> str:
"""simple docstring"""
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(a )
lowercase__ = False
out_string += self.sp_model.decode(a )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Union[str, List[str]] , a : Union[str, bool] = False )-> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = self.preprocess_text(a )
lowercase__ = self.sp_model.encode(a )
else:
lowercase__ = [self.preprocess_text(a ) for t in text]
lowercase__ = self.sp_model.encode(a )
if return_tensors is True or return_tensors == "pt":
lowercase__ = torch.tensor(a )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[int, List[int]] )-> str:
"""simple docstring"""
return self.sp_model.decode(a )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : "Conversation" )-> List[int]:
"""simple docstring"""
lowercase__ = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase__ = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(a ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=a )
| 269 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : int=False) -> int:
'''simple docstring'''
__lowercase = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias"""))
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
return rename_keys
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : List[str], UpperCamelCase_ : Optional[Any]=False) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers):
if base_model:
__lowercase = ""
else:
__lowercase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
__lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def _A ( UpperCamelCase_ : Tuple) -> Dict:
'''simple docstring'''
__lowercase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_, UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
__lowercase = dct.pop(UpperCamelCase_)
__lowercase = val
def _A ( ) -> Optional[int]:
'''simple docstring'''
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_).raw)
return im
@torch.no_grad()
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : Any, UpperCamelCase_ : str=True) -> Any:
'''simple docstring'''
__lowercase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowercase = 8
# set labels if required
if not base_model:
__lowercase = 1000
__lowercase = "huggingface/label-files"
__lowercase = "imagenet-1k-id2label.json"
__lowercase = json.load(open(hf_hub_download(UpperCamelCase_, UpperCamelCase_, repo_type="dataset"), "r"))
__lowercase = {int(UpperCamelCase_): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowercase = 384
__lowercase = 1536
__lowercase = 12
__lowercase = 6
# load original model from torch hub
__lowercase = torch.hub.load("facebookresearch/dino:main", UpperCamelCase_)
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase_)
__lowercase = create_rename_keys(UpperCamelCase_, base_model=UpperCamelCase_)
for src, dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
read_in_q_k_v(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
# load HuggingFace model
if base_model:
__lowercase = ViTModel(UpperCamelCase_, add_pooling_layer=UpperCamelCase_).eval()
else:
__lowercase = ViTForImageClassification(UpperCamelCase_).eval()
model.load_state_dict(UpperCamelCase_)
# Check outputs on an image, prepared by ViTImageProcessor
__lowercase = ViTImageProcessor()
__lowercase = image_processor(images=prepare_img(), return_tensors="pt")
__lowercase = encoding["pixel_values"]
__lowercase = model(UpperCamelCase_)
if base_model:
__lowercase = original_model(UpperCamelCase_)
assert torch.allclose(UpperCamelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1)
else:
__lowercase = original_model(UpperCamelCase_)
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase_, outputs.logits, atol=1E-3)
Path(UpperCamelCase_).mkdir(exist_ok=UpperCamelCase_)
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(UpperCamelCase_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(UpperCamelCase_)
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 17 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowerCAmelCase ( lowerCamelCase__ ):
# to overwrite at feature extractactor specific tests
__lowerCamelCase = None
__lowerCamelCase = None
@property
def snake_case ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , """feature_size""" ) )
self.assertTrue(hasattr(_snake_case , """sampling_rate""" ) )
self.assertTrue(hasattr(_snake_case , """padding_value""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = self.feat_extract_tester.seq_length_diff
_lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCAmelCase = self.feat_extract_tester.min_seq_length
_lowerCAmelCase = self.feat_extract_tester.batch_size
_lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , padding=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" )[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(_snake_case , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(_snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
_lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def snake_case ( self , _snake_case=False ):
"""simple docstring"""
def _inputs_have_equal_length(_snake_case ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_snake_case ) != length:
return False
return True
def _inputs_are_equal(_snake_case , _snake_case ):
if len(_snake_case ) != len(_snake_case ):
return False
for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ):
if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1e-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to smallest with np
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
# truncate to middle
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_snake_case )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""longest""" , truncation=_snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_snake_case ):
feat_extract.pad(_snake_case , padding="""max_length""" , truncation=_snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = 12
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , truncation=_snake_case , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , )
_lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_snake_case ) )
self.assertFalse(_inputs_have_equal_length(_snake_case ) )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_padding(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._check_truncation(numpify=_snake_case )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(_snake_case , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**_snake_case )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(_snake_case ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = min(_snake_case )
_lowerCAmelCase = feat_extract.pad(
_snake_case , padding="""max_length""" , max_length=_snake_case , truncation=_snake_case , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : Any = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'decision_transformer'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=17 , _a=4 , _a=128 , _a=4_096 , _a=True , _a=1 , _a=1_024 , _a=3 , _a=1 , _a=None , _a="relu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=False , _a=False , **_a , ):
__magic_name__ : Optional[Any] = state_dim
__magic_name__ : Optional[Any] = act_dim
__magic_name__ : Any = hidden_size
__magic_name__ : Union[str, Any] = max_ep_len
__magic_name__ : Optional[int] = action_tanh
__magic_name__ : Tuple = vocab_size
__magic_name__ : Tuple = n_positions
__magic_name__ : Dict = n_layer
__magic_name__ : Optional[int] = n_head
__magic_name__ : Any = n_inner
__magic_name__ : Union[str, Any] = activation_function
__magic_name__ : List[str] = resid_pdrop
__magic_name__ : str = embd_pdrop
__magic_name__ : List[str] = attn_pdrop
__magic_name__ : Any = layer_norm_epsilon
__magic_name__ : Tuple = initializer_range
__magic_name__ : Union[str, Any] = scale_attn_weights
__magic_name__ : Tuple = use_cache
__magic_name__ : List[Any] = scale_attn_by_inverse_layer_idx
__magic_name__ : Optional[int] = reorder_and_upcast_attn
__magic_name__ : List[str] = bos_token_id
__magic_name__ : Any = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 41 |
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 0:
return False
__magic_name__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : str = """▁"""
lowercase : Dict = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowercase : Optional[Any] = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowercase : str = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowercase : int = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
lowercase : List[Any] = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class __snake_case ( lowerCAmelCase ):
_a : List[str]= ["input_ids"]
_a : Optional[Any]= VOCAB_FILES_NAMES
_a : Tuple= PRETRAINED_INIT_CONFIGURATION
_a : Optional[int]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int= PRETRAINED_VOCAB_FILES_MAP
_a : Dict= RESOURCE_FILES_NAMES
def __init__( self ,snake_case ,snake_case=None ,snake_case=False ,snake_case="utf8" ,snake_case="[UNK]" ,snake_case="[SEP]" ,snake_case="[PAD]" ,snake_case="[CLS]" ,snake_case="[MASK]" ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case ,unk_token=snake_case ,sep_token=snake_case ,pad_token=snake_case ,cls_token=snake_case ,mask_token=snake_case ,vocab_file=snake_case ,encoding=snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**snake_case ,)
lowercase : List[str] = do_lower_case
lowercase : List[Any] = sentencepiece_model_ckpt
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase : Dict = self.load_vocab(filepath=snake_case )
else:
lowercase : Dict = {self.sp_model.id_to_piece(snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowercase : Optional[int] = {v: k for k, v in self.vocab.items()}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if text is None:
return None
lowercase : Optional[Any] = self.tokenize(snake_case )
lowercase , lowercase : Tuple = """""", []
for i, ch in enumerate(snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowercase : Tuple = self.SP_CHAR_MAPPING.get(snake_case )
else:
lowercase : str = unicodedata.normalize("""NFKC""" ,snake_case )
if self.is_whitespace(snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case ) )
lowercase , lowercase , lowercase : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
lowercase : str = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase : Optional[int] = token[1:]
lowercase : Optional[int] = text[offset:].index(snake_case ) + offset
lowercase : Union[str, Any] = start + len(snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase : List[Any] = end
return token_mapping
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
lowercase : List[str] = self.__dict__.copy()
lowercase : Optional[int] = None
return state
def __setstate__( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowercase : List[Any] = {}
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case ,snake_case ) for c in text) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=False ,snake_case=64 ,snake_case=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
lowercase : Union[str, Any] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
lowercase : Optional[int] = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
lowercase : int = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
lowercase : Tuple = self.sp_model.EncodeAsPieces(snake_case )
else:
lowercase : Tuple = self.sp_model.SampleEncodeAsPieces(snake_case ,snake_case ,snake_case )
lowercase : Any = []
for pi, piece in enumerate(snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case ) and pi != 0:
new_pieces.append(snake_case )
continue
else:
continue
lowercase : Optional[Any] = 0
for i, chunk in enumerate(snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case ) or self.is_punct(snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case )
lowercase : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase : List[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase : List[Any] = i
if len(snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = """""".join(snake_case ).replace(snake_case ,""" """ ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = self.convert_ids_to_tokens(snake_case )
lowercase : Any = """""".join(snake_case ).replace(snake_case ,""" """ ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.vocab.get(snake_case ,self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : int = [self.cls_token_id]
lowercase : Union[str, Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case ) + 1) + [1] * (len(snake_case ) + 3)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case ) == 1:
lowercase : Optional[Any] = unicodedata.category(snake_case )
if cat == "Zs":
return True
return False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = {}
with io.open(snake_case ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case ):
lowercase : Any = line.rstrip("""\n""" )
lowercase : Union[str, Any] = int(snake_case )
return token_to_idx
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Dict = 0
if os.path.isdir(snake_case ):
lowercase : int = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowercase : List[str] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() ,key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
lowercase : Any = token_index
writer.write(token + """\n""" )
index += 1
lowercase : List[Any] = os.path.join(snake_case ,"""sentencepiece.bpe.model""" )
with open(snake_case ,"""wb""" ) as fi:
lowercase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (vocab_file,)
| 20 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCAmelCase__ : List[Any] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCAmelCase__ : int = text_generator("""This is a test""" , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
] , )
UpperCAmelCase__ : Optional[int] = text_generator.model.config.eos_token_id
UpperCAmelCase__ : Any = """<pad>"""
UpperCAmelCase__ : Any = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , )
self.assertEqual(
_lowerCamelCase , [
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCAmelCase__ : Dict = text_generator(["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = TextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """Hello I believe in"""
UpperCAmelCase__ : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCAmelCase__ : int = text_generator(_lowerCamelCase , stop_sequence=""" fe""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = text_generator.model
UpperCAmelCase__ : Union[str, Any] = text_generator.tokenizer
UpperCAmelCase__ : Any = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : List[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : int = pipeline(task="""text-generation""" , model=_lowerCamelCase , tokenizer=_lowerCamelCase , return_full_text=_lowerCamelCase )
UpperCAmelCase__ : Dict = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : Optional[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase__ : Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : List[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_text=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Any = text_generator("""test""" , return_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase__ : Dict = text_generator("""""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase__ : str = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCAmelCase__ : str = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowerCamelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCAmelCase__ : str = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : List[str] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase__ : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : Any = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=_lowerCamelCase , top_p=0.5 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """Hello world"""
UpperCAmelCase__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCAmelCase__ : Any = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCAmelCase__ : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" )
UpperCAmelCase__ : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : List[str] = text_generator(_lowerCamelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(_lowerCamelCase , cl.out )
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Optional[Any] = text_generator(_lowerCamelCase , max_length=10 )
self.assertNotIn(_lowerCamelCase , cl.out )
| 171 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] )-> str:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Dict = np.full((len(snake_case ), sequence_length, 2) , snake_case )
else:
UpperCAmelCase__ : List[str] = np.full((len(snake_case ), sequence_length) , snake_case )
for i, tensor in enumerate(snake_case ):
if padding_side == "right":
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : List[str] = tensor[:sequence_length]
else:
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ord(snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[int] = unicodedata.category(snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =True
SCREAMING_SNAKE_CASE_ =None
SCREAMING_SNAKE_CASE_ =None
SCREAMING_SNAKE_CASE_ =-100
SCREAMING_SNAKE_CASE_ ="pt"
def __a ( self : Union[str, Any] , snake_case__ : Any ):
'''simple docstring'''
import torch
UpperCAmelCase__ : List[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : Any = self.tokenizer.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Any = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : List[str] = [
list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels
]
else:
UpperCAmelCase__ : str = [
[self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels
]
UpperCAmelCase__ : Dict = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : str = padding_tensor(snake_case__ , -1 , snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[int] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : str = padding_tensor(snake_case__ , (-1, -1) , snake_case__ , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = {k: torch.tensor(snake_case__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 298 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_snake_case : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
# test with a raw waveform
_UpperCamelCase = np.zeros((34000,) )
_UpperCamelCase = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = examples
_UpperCamelCase = audio_classifier(lowerCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
] , )
_UpperCamelCase = audio_classifier(lowerCAmelCase__ , top_k=1 )
self.assertEqual(
lowerCAmelCase__ , [
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
] , )
self.run_torchaudio(lowerCAmelCase__ )
@require_torchaudio
def snake_case__ ( self : int , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
import datasets
# test with a local file
_UpperCamelCase = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
_UpperCamelCase = dataset[0]['''audio''']['''array''']
_UpperCamelCase = audio_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
] , )
@require_torch
def snake_case__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''anton-l/wav2vec2-random-tiny-classifier'''
_UpperCamelCase = pipeline('''audio-classification''' , model=lowerCAmelCase__ )
_UpperCamelCase = np.ones((8000,) )
_UpperCamelCase = audio_classifier(lowerCAmelCase__ , top_k=4 )
_UpperCamelCase = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
_UpperCamelCase = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_UpperCamelCase = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
_UpperCamelCase = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
import datasets
_UpperCamelCase = '''superb/wav2vec2-base-superb-ks'''
_UpperCamelCase = pipeline('''audio-classification''' , model=lowerCAmelCase__ )
_UpperCamelCase = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
_UpperCamelCase = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
_UpperCamelCase = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
pass
| 324 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = LevitImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 324 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : Any =logging.get_logger(__name__)
# TODO: upload to AWS
__snake_case : Any ={
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCamelCase__ ( __lowerCamelCase):
'''simple docstring'''
snake_case_ ='retribert'
def __init__(self ,__lowerCamelCase=3_05_22 ,__lowerCamelCase=7_68 ,__lowerCamelCase=8 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=True ,__lowerCamelCase=1_28 ,__lowerCamelCase=0 ,**__lowerCamelCase ,) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ ,**UpperCamelCase_ )
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : List[Any] = share_encoders
lowerCAmelCase__ : str = projection_dim
| 357 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case : Dict =HfArgumentParser(InitializationArguments)
__snake_case : Tuple =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case : List[str] ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__snake_case : List[Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case : int =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 94 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__snake_case : Dict = False
__snake_case : str = True
__snake_case : int = False
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : Optional[Any] = parser.parse_args()
__snake_case : List[str] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__snake_case : Union[str, Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__snake_case : Dict = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__snake_case : Optional[int] = reader.read()
__snake_case : Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__snake_case : Union[str, Any] = UNetaDModel(**config)
else:
__snake_case : Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__snake_case : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__snake_case : str = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__snake_case : List[str] = config[key]
del config[key]
__snake_case : Tuple = [k.replace('UNetRes', '') for k in config['down_block_types']]
__snake_case : Union[str, Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__snake_case : Union[str, Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__snake_case : Union[str, Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__snake_case : int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__snake_case : Union[str, Any] = param_value
__snake_case : Dict = True
if not has_changed:
__snake_case : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 269 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
@require_torch
def __lowercase ( self ) -> Dict:
_a : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_a : str = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_a : str = bertabert.config.encoder.vocab_size
_a : int = tokenizer.sep_token_id
_a : List[Any] = tokenizer.cls_token_id
_a : Tuple = 1_2_8
_a : Union[str, Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_a : Optional[int] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_a : Union[str, Any] = train_dataset.select(range(3_2 ) )
_a : List[Any] = val_dataset.select(range(1_6 ) )
_a : Tuple = 4
def _map_to_encoder_decoder_inputs(_a ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_a : List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_a , max_length=5_1_2 )
_a : List[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_a , max_length=1_2_8 )
_a : Any = inputs.input_ids
_a : List[str] = inputs.attention_mask
_a : List[Any] = outputs.input_ids
_a : Any = outputs.input_ids.copy()
_a : Tuple = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_a : Optional[int] = outputs.attention_mask
assert all(len(_a ) == 5_1_2 for x in inputs.input_ids )
assert all(len(_a ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(_a ):
_a : int = pred.label_ids
_a : int = pred.predictions
# all unnecessary tokens are removed
_a : int = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : Union[str, Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
_a : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_a : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_a : Union[str, Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy='''steps''' , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_a : str = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train()
| 367 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41 | 1 |
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
"""simple docstring"""
_a : dict[int, int] = {}
_a : int = 2
while True:
_a : Union[str, Any] = factor_map.pop(__a ,__a )
if factor:
_a : str = factor + prime
while x in factor_map:
x += factor
_a : Any = factor
else:
_a : Tuple = prime
yield prime
prime += 1
def __UpperCAmelCase ( __a : float = 1E10 ) -> int:
"""simple docstring"""
_a : Optional[int] = sieve()
_a : List[str] = 1
while True:
_a : Optional[int] = next(__a )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__a )
n += 2
if __name__ == "__main__":
print(solution())
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
'''simple docstring'''
import random
from typing import Any
def __lowerCAmelCase ( snake_case__ ):
for _ in range(len(snake_case__ ) ):
__UpperCamelCase : Union[str, Any] = random.randint(0 , len(snake_case__ ) - 1 )
__UpperCamelCase : Tuple = random.randint(0 , len(snake_case__ ) - 1 )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
_lowerCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 298 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case = get_tests_dir("fixtures")
_snake_case = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_snake_case = get_tests_dir("fixtures/dummy-config.json")
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
_A : Optional[int] = 0
def a__ ( self ) -> List[str]:
_A : int = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Tuple:
_A : Dict = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_A : str = AutoFeatureExtractor.from_pretrained(_a ).to_dict()
config_dict.pop("""feature_extractor_type""" )
_A : Optional[int] = WavaVecaFeatureExtractor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_A : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Optional[Any]:
_A : int = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> int:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> Any:
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
_A : int = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def a__ ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
_A : Dict = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[str] = AutoFeatureExtractor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def a__ ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoFeatureExtractor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
_A : List[str] = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_a )
_A : List[Any] = AutoFeatureExtractor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
class lowercase ( UpperCamelCase__ ):
_a = True
try:
AutoConfig.register("""custom""" , _a )
AutoFeatureExtractor.register(_a , _a )
# If remote code is not set, the default is to use local
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_A : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_A : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_a )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase ():
__a : Optional[int] = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=_SCREAMING_SNAKE_CASE )
__a : Any = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
__a , __a : Dict = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
__a : List[str] = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
__a : Optional[Any] = args.func(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 27 |
import math
class _snake_case :
def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1
a :Optional[int] = n
a :Union[str, Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # adjacency matrix for weight
a :List[Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = w
def SCREAMING_SNAKE_CASE__ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return self.dp[u][v]
if __name__ == "__main__":
snake_case : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 94 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
snake_case__ = tuple[int, int]
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : set[int] , _lowerCamelCase : Mapping[EdgeT, int] ):
"""simple docstring"""
A_ : set[int] = vertices
A_ : dict[EdgeT, int] = {
(min(_lowerCamelCase ), max(_lowerCamelCase )): weight for edge, weight in edges.items()
}
def _a ( self : Any , _lowerCamelCase : EdgeT , _lowerCamelCase : int ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A_ : List[Any] = weight
def _a ( self : Dict ):
"""simple docstring"""
A_ : Graph = Graph({min(self.vertices )} , {} )
A_ : EdgeT
A_ : int
A_ : EdgeT
A_ : int
while len(subgraph.vertices ) < len(self.vertices ):
A_ : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A_ : Optional[Any] = edge
A_ : Union[str, Any] = weight
subgraph.add_edge(_lowerCamelCase , _lowerCamelCase )
return subgraph
def snake_case__ ( lowerCamelCase__ : str = "p107_network.txt" ) -> int:
A_ : str = os.path.abspath(os.path.dirname(lowerCamelCase__ ) )
A_ : str = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
A_ : dict[EdgeT, int] = {}
A_ : list[str]
A_ : int
A_ : int
with open(lowerCamelCase__ ) as f:
A_ : Any = f.read().strip().split('''\n''' )
A_ : Tuple = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowerCamelCase__ ) ):
for edgea in range(lowerCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
A_ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
A_ : Graph = Graph(set(range(len(lowerCamelCase__ ) ) ) , lowerCamelCase__ )
A_ : Graph = graph.prims_algorithm()
A_ : int = sum(graph.edges.values() )
A_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 368 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 0 |
from collections.abc import Callable
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase = None) -> Union[str, Any]:
# Stores actual heap items.
__UpperCamelCase :List[Any] = []
# Stores indexes of each item for supporting updates and deletion.
__UpperCamelCase :List[Any] = {}
# Stores current size of heap.
__UpperCamelCase :Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__UpperCamelCase :List[Any] = key or (lambda __lowercase: x)
def UpperCamelCase__ ( self , __lowercase) -> Any:
return int((i - 1) / 2) if i > 0 else None
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = int(2 * i + 1)
return left if 0 < left < self.size else None
def UpperCamelCase__ ( self , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = int(2 * i + 2)
return right if 0 < right < self.size else None
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple:
__UpperCamelCase , __UpperCamelCase :Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__UpperCamelCase , __UpperCamelCase :int = self.arr[j], self.arr[i]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
return self.arr[i][1] < self.arr[j][1]
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Optional[int] = self._left(__lowercase)
__UpperCamelCase :Dict = self._right(__lowercase)
__UpperCamelCase :List[Any] = i
if left is not None and not self._cmp(__lowercase , __lowercase):
__UpperCamelCase :Optional[int] = left
if right is not None and not self._cmp(__lowercase , __lowercase):
__UpperCamelCase :Dict = right
return valid_parent
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Optional[Any] = self._parent(__lowercase)
while parent is not None and not self._cmp(__lowercase , __lowercase):
self._swap(__lowercase , __lowercase)
__UpperCamelCase , __UpperCamelCase :int = parent, self._parent(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = self._get_valid_parent(__lowercase)
while valid_parent != index:
self._swap(__lowercase , __lowercase)
__UpperCamelCase , __UpperCamelCase :List[Any] = valid_parent, self._get_valid_parent(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> int:
if item not in self.pos_map:
return
__UpperCamelCase :Any = self.pos_map[item]
__UpperCamelCase :Dict = [item, self.key(__lowercase)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Tuple:
if item not in self.pos_map:
return
__UpperCamelCase :Dict = self.pos_map[item]
del self.pos_map[item]
__UpperCamelCase :Optional[Any] = self.arr[self.size - 1]
__UpperCamelCase :Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :Optional[Any] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(__lowercase)])
else:
__UpperCamelCase :str = [item, self.key(__lowercase)]
__UpperCamelCase :int = self.size
self.size += 1
self._heapify_up(self.size - 1)
def UpperCamelCase__ ( self) -> List[Any]:
return self.arr[0] if self.size else None
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE :Union[str, Any] = False
SCREAMING_SNAKE_CASE :Any = True
SCREAMING_SNAKE_CASE :Tuple = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
SCREAMING_SNAKE_CASE :Dict = reader.read()
SCREAMING_SNAKE_CASE :List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE :List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE :Optional[Any] = config[key]
del config[key]
SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
SCREAMING_SNAKE_CASE :Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
SCREAMING_SNAKE_CASE :List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
SCREAMING_SNAKE_CASE :List[Any] = param_value
SCREAMING_SNAKE_CASE :str = True
if not has_changed:
SCREAMING_SNAKE_CASE :List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Tuple = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = StableDiffusionPanoramaPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self: int ):
torch.manual_seed(0 )
lowercase :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase :Any = DDIMScheduler()
torch.manual_seed(0 )
lowercase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase :Any = CLIPTextModel(_lowerCAmelCase )
lowercase :str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase :Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :int = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Tuple = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :List[str] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self: int ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[str] = self.get_dummy_components()
lowercase :Optional[Any] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = "french fries"
lowercase :int = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase :int = output.images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Dict = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase :Union[str, Any] = output.images
lowercase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
lowercase :Tuple = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :int = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=_lowerCAmelCase )
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Union[str, Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Tuple = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any]=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Optional[int] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :Any = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Tuple = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowerCAmelCase )
lowercase :int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Union[str, Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = 0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
lowercase :Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Any = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase :str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Optional[Any] = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase :int = False
lowercase :Tuple = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self: str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase :Optional[Any] = "stabilityai/stable-diffusion-2-base"
lowercase :Dict = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase :Optional[int] = self.get_inputs()
lowercase :Union[str, Any] = pipe(**_lowerCAmelCase )
lowercase :List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 158 | 0 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase ( ) -> Generator[int, None, None]:
"""simple docstring"""
__A = {}
__A = 2
while True:
__A = factor_map.pop(a_ , a_ )
if factor:
__A = factor + prime
while x in factor_map:
x += factor
__A = factor
else:
__A = prime
yield prime
prime += 1
def UpperCAmelCase ( a_ = 1E10 ) -> int:
"""simple docstring"""
__A = sieve()
__A = 1
while True:
__A = next(a_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a_ )
n += 2
if __name__ == "__main__":
print(solution())
| 15 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15 | 1 |
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
@dataclass
class A :
"""simple docstring"""
def __init__( self : Dict,lowercase_ : Any=False,lowercase_ : Optional[Any]=False,lowercase_ : int=6.0,lowercase_ : Any=None,lowercase_ : List[Any]=False,lowercase_ : Any=False,lowercase_ : Dict=None,lowercase_ : Dict="fp4",lowercase_ : Dict=False,**lowercase_ : int,)-> List[Any]:
'''simple docstring'''
A__ = load_in_abit
A__ = load_in_abit
A__ = llm_inta_threshold
A__ = llm_inta_skip_modules
A__ = llm_inta_enable_fpaa_cpu_offload
A__ = llm_inta_has_fpaa_weight
A__ = bnb_abit_quant_type
A__ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
A__ = torch.floataa
elif isinstance(lowercase_,lowercase_ ):
A__ = getattr(lowercase_,lowercase_ )
elif isinstance(lowercase_,torch.dtype ):
A__ = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold,lowercase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules,lowercase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload,lowercase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight,lowercase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype,torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type,lowercase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant,lowercase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case__ ( cls : str,lowercase_ : int,lowercase_ : Tuple,**lowercase_ : List[str] )-> List[Any]:
'''simple docstring'''
A__ = cls(**lowercase_ )
A__ = []
for key, value in kwargs.items():
if hasattr(lowercase_,lowercase_ ):
setattr(lowercase_,lowercase_,lowercase_ )
to_remove.append(lowercase_ )
for key in to_remove:
kwargs.pop(lowercase_,lowercase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, os.PathLike] )-> Any:
'''simple docstring'''
with open(lowercase_,'w',encoding='utf-8' ) as writer:
A__ = self.to_dict()
A__ = json.dumps(lowercase_,indent=2,sort_keys=lowercase_ ) + '\n'
writer.write(lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Dict[str, Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Any )-> Tuple:
'''simple docstring'''
return F'{self.__class__.__name__} {self.to_json_string()}'
def snake_case__ ( self : Dict,lowercase_ : bool = True )-> str:
'''simple docstring'''
if use_diff is True:
A__ = self.to_diff_dict()
else:
A__ = self.to_dict()
return json.dumps(lowercase_,indent=2,sort_keys=lowercase_ ) + "\n"
def snake_case__ ( self : Optional[Any] )-> Dict[str, Any]:
'''simple docstring'''
A__ = self.to_dict()
# get the default config dict
A__ = BitsAndBytesConfig().to_dict()
A__ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
A__ = value
return serializable_config_dict
| 359 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowercase_ = "zero2"
lowercase_ = "zero3"
lowercase_ = [ZEROa, ZEROa]
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
A__ = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( _UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : List[Any] )-> int:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
def snake_case__ ( self : Tuple,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
pass
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,)-> Union[str, Any]:
'''simple docstring'''
A__ = models[model]
A__ = self.run_trainer(
stage=lowercase_,model_name=lowercase_,eval_steps=lowercase_,num_train_epochs=1,distributed=lowercase_,fpaa=lowercase_,)
self.do_checks(lowercase_ )
return output_dir
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : int = 1,lowercase_ : bool = True,lowercase_ : bool = True,)-> Any:
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir('./xxx',after=lowercase_ )
A__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A__ = self.get_launcher(lowercase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_,env=self.get_env() )
return output_dir
def snake_case__ ( self : Any,lowercase_ : int=False )-> Tuple:
'''simple docstring'''
A__ = min(2,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 282 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 1 |
class __lowerCAmelCase :
def __init__( self :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = name
a = val
def __str__( self :Union[str, Any] ):
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self :Any , __magic_name__ :List[Any] ):
'''simple docstring'''
return self.val < other.val
class __lowerCAmelCase :
def __init__( self :Tuple , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {}
a = {}
a = self.build_heap(__magic_name__ )
def __getitem__( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
return self.get_value(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :str ):
'''simple docstring'''
return (idx - 1) // 2
def lowerCamelCase__ ( self :Tuple , __magic_name__ :List[str] ):
'''simple docstring'''
return idx * 2 + 1
def lowerCamelCase__ ( self :Any , __magic_name__ :int ):
'''simple docstring'''
return idx * 2 + 2
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Any ):
'''simple docstring'''
return self.heap_dict[key]
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = len(__magic_name__ ) - 1
a = self.get_parent_idx(__magic_name__ )
for idx, i in enumerate(__magic_name__ ):
a = idx
a = i.val
for i in range(__magic_name__ , -1 , -1 ):
self.sift_down(__magic_name__ , __magic_name__ )
return array
def lowerCamelCase__ ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
while True:
a = self.get_left_child_idx(__magic_name__ ) # noqa: E741
a = self.get_right_child_idx(__magic_name__ )
a = idx
if l < len(__magic_name__ ) and array[l] < array[idx]:
a = l
if r < len(__magic_name__ ) and array[r] < array[smallest]:
a = r
if smallest != idx:
a , a = array[smallest], array[idx]
(
(
a
) , (
a
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a = smallest
else:
break
def lowerCamelCase__ ( self :Dict , __magic_name__ :str ):
'''simple docstring'''
a = self.get_parent_idx(__magic_name__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
a , a = self.heap[idx], self.heap[p]
a , a = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a = p
a = self.get_parent_idx(__magic_name__ )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return self.heap[0]
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a , a = self.heap[-1], self.heap[0]
a , a = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase__ ( self :str , __magic_name__ :Any ):
'''simple docstring'''
self.heap.append(__magic_name__ )
a = len(self.heap ) - 1
a = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return len(self.heap ) == 0
def lowerCamelCase__ ( self :str , __magic_name__ :int , __magic_name__ :Any ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a = new_value
a = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCamelCase : Optional[Any] = Node("R", -1)
__UpperCamelCase : Union[str, Any] = Node("B", 6)
__UpperCamelCase : List[str] = Node("A", 3)
__UpperCamelCase : Union[str, Any] = Node("X", 1)
__UpperCamelCase : int = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCamelCase : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a =logging.get_logger(__name__)
class A_ ( __lowercase ):
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : List[str]):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,UpperCAmelCase__ ,)
super().__init__(*UpperCAmelCase__ ,**UpperCAmelCase__)
| 73 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
def lowerCAmelCase__ ( _a : int ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_UpperCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(_UpperCAmelCase ) == 1:
return True
snake_case_ : List[Any] = series[1] - series[0]
for index in range(len(_UpperCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCAmelCase__ ( _a : Optional[Any] ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_UpperCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
snake_case_ : Any = 0
for val in series:
answer += val
return answer / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
lowercase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 36 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_snake_case : Union[str, Any] = "scheduler_config.json"
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = 1
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Any = 3
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Dict = 5
__UpperCAmelCase : Optional[int] = 6
__UpperCAmelCase : Optional[int] = 7
__UpperCAmelCase : Optional[Any] = 8
__UpperCAmelCase : Dict = 9
__UpperCAmelCase : int = 10
__UpperCAmelCase : Union[str, Any] = 11
__UpperCAmelCase : Tuple = 12
__UpperCAmelCase : int = 13
__UpperCAmelCase : Optional[Any] = 14
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
class a :
"""simple docstring"""
__UpperCAmelCase : List[Any] = SCHEDULER_CONFIG_NAME
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[int] = True
@classmethod
def __snake_case ( cls : Optional[Any] , lowerCamelCase : int = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : str , ) -> Union[str, Any]:
__snake_case , __snake_case , __snake_case : Any = cls.load_config(
pretrained_model_name_or_path=_lowerCAmelCase , subfolder=_lowerCAmelCase , return_unused_kwargs=_lowerCAmelCase , return_commit_hash=_lowerCAmelCase , **_lowerCAmelCase , )
return cls.from_config(_lowerCAmelCase , return_unused_kwargs=_lowerCAmelCase , **_lowerCAmelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] = False , **lowerCamelCase : Optional[int] ) -> Optional[int]:
self.save_config(save_directory=_lowerCAmelCase , push_to_hub=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __snake_case ( self : List[str] ) -> Dict:
return self._get_compatibles()
@classmethod
def __snake_case ( cls : Union[str, Any] ) -> Optional[Any]:
__snake_case : List[str] = list(set([cls.__name__] + cls._compatibles ) )
__snake_case : Optional[Any] = importlib.import_module(__name__.split("." )[0] )
__snake_case : Any = [
getattr(_lowerCAmelCase , _lowerCAmelCase ) for c in compatible_classes_str if hasattr(_lowerCAmelCase , _lowerCAmelCase )
]
return compatible_classes
| 123 |
'''simple docstring'''
from collections.abc import Sequence
def __a(SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("-inf" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 158 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
_UpperCAmelCase = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
__lowerCAmelCase : Dict = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCAmelCase : List[str] = int(re.match(r""".*layer_(\d*).*""" , SCREAMING_SNAKE_CASE )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple ) -> List[str]:
if dtype == torch.bool:
return 1 / 8
__lowerCAmelCase : Union[str, Any] = re.search(r"""[^\d](\d+)$""" , str(SCREAMING_SNAKE_CASE ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
__lowerCAmelCase : Dict = int(bit_search.groups()[0] )
return bit_size // 8
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Union[str, Any]:
# Construct model
if bloom_config_file == "":
__lowerCAmelCase : str = BloomConfig()
else:
__lowerCAmelCase : Optional[int] = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE )
if shard_model:
__lowerCAmelCase : Tuple = os.listdir(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = sorted(filter(lambda SCREAMING_SNAKE_CASE : s.startswith("""layer""" ) and "model_00" in s , SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = {"""weight_map""": {}, """metadata""": {}}
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Dict = BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE ):
print("""Processing file: {}""".format(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = None
for i in range(SCREAMING_SNAKE_CASE ):
# load all TP files
__lowerCAmelCase : Dict = file.replace("""model_00""" , F'''model_0{i}''' )
__lowerCAmelCase : List[str] = torch.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , map_location="""cpu""" )
# Rename keys in the transformers names
__lowerCAmelCase : Tuple = list(temp.keys() )
for key in keys:
__lowerCAmelCase : int = temp.pop(SCREAMING_SNAKE_CASE )
if tensors is None:
__lowerCAmelCase : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase : Union[str, Any] = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase : List[Any] = tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE , os.path.join(
SCREAMING_SNAKE_CASE , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCAmelCase : List[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCAmelCase : Optional[Any] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE ) ).zfill(5 ) )
__lowerCAmelCase : Union[str, Any] = BloomConfig()
__lowerCAmelCase : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowerCAmelCase : List[Any] = total_size
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Tuple = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + """\n"""
f.write(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = BloomModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = sorted(filter(lambda SCREAMING_SNAKE_CASE : s.startswith("""layer""" ) and "model_00" in s , SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = None
for i, file in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
for i in range(SCREAMING_SNAKE_CASE ):
# load all TP files
__lowerCAmelCase : Optional[Any] = file.replace("""model_00""" , F'''model_0{i}''' )
__lowerCAmelCase : Optional[int] = torch.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , map_location="""cpu""" )
# Rename keys in the transformers names
__lowerCAmelCase : Optional[Any] = list(temp.keys() )
for key in keys:
__lowerCAmelCase : Any = temp.pop(SCREAMING_SNAKE_CASE )
if tensors is None:
__lowerCAmelCase : List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase : List[Any] = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase : Dict = tensors[key] / pretraining_tp
__lowerCAmelCase : Optional[int] = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__lowerCAmelCase : Union[str, Any] = set(other_keys.missing_keys )
else:
__lowerCAmelCase : List[str] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowerCAmelCase : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__lowerCAmelCase : List[str] = model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
_UpperCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 356 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__lowerCAmelCase : str = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase : str = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print("""\n""".join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase : str = 1
return True
__lowerCAmelCase : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase : Optional[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase : Tuple = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
__lowerCAmelCase : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForTokenClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Dict:
lowerCAmelCase = TFDistilBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 46 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase_ = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
UpperCamelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ["""input_ids""", """attention_mask"""]
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
a_ = legacy_behaviour
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
a_ = vocab_file
a_ = False if not self.vocab_file else True
a_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
a_ = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a_ = src_lang if src_lang is not None else "eng_Latn"
a_ = self.convert_tokens_to_ids(self._src_lang)
a_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def UpperCAmelCase__ ( self) ->str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
a_ = src_lang
a_ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
a_ = self.convert_tokens_to_ids(__UpperCAmelCase)
a_ = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "eng_Latn" , __UpperCAmelCase = None , __UpperCAmelCase = "fra_Latn" , **__UpperCAmelCase , ) ->BatchEncoding:
a_ = src_lang
a_ = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCAmelCase__ ( self) ->Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self.convert_tokens_to_ids(__UpperCAmelCase)
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens)
a_ = self.convert_ids_to_tokens(self.suffix_tokens)
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self.convert_tokens_to_ids(__UpperCAmelCase)
if self.legacy_behaviour:
a_ = []
a_ = [self.eos_token_id, self.cur_lang_code]
else:
a_ = [self.cur_lang_code]
a_ = [self.eos_token_id]
a_ = self.convert_ids_to_tokens(self.prefix_tokens)
a_ = self.convert_ids_to_tokens(self.suffix_tokens)
a_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__UpperCAmelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''')
return
a_ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase):
copyfile(self.vocab_file , __UpperCAmelCase)
return (out_vocab_file,) | 303 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase)) | 303 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = SwinConfig()
UpperCAmelCase : List[str] = swin_name.split("""_""" )
UpperCAmelCase : str = name_split[1]
UpperCAmelCase : List[Any] = int(name_split[4] )
UpperCAmelCase : Any = int(name_split[3][-1] )
if model_size == "tiny":
UpperCAmelCase : Any = 9_6
UpperCAmelCase : Optional[Any] = (2, 2, 6, 2)
UpperCAmelCase : Optional[Any] = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCAmelCase : Union[str, Any] = 9_6
UpperCAmelCase : Any = (2, 2, 1_8, 2)
UpperCAmelCase : Union[str, Any] = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCAmelCase : Optional[Any] = 1_2_8
UpperCAmelCase : Optional[Any] = (2, 2, 1_8, 2)
UpperCAmelCase : Any = (4, 8, 1_6, 3_2)
else:
UpperCAmelCase : Optional[int] = 1_9_2
UpperCAmelCase : str = (2, 2, 1_8, 2)
UpperCAmelCase : List[str] = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
UpperCAmelCase : List[str] = 2_1_8_4_1
else:
UpperCAmelCase : int = 1_0_0_0
UpperCAmelCase : List[Any] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : int = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase : Dict = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : int = img_size
UpperCAmelCase : Any = num_classes
UpperCAmelCase : List[Any] = embed_dim
UpperCAmelCase : List[str] = depths
UpperCAmelCase : str = num_heads
UpperCAmelCase : Optional[int] = window_size
return config
def __lowerCamelCase ( _lowercase ) -> Any:
if "patch_embed.proj" in name:
UpperCAmelCase : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
UpperCAmelCase : Union[str, Any] = """encoder.""" + name
if "attn.proj" in name:
UpperCAmelCase : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase : str = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
UpperCAmelCase : int = """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase : Optional[int] = """layernorm.bias"""
if "head" in name:
UpperCAmelCase : str = name.replace("""head""" , """classifier""" )
else:
UpperCAmelCase : int = """swin.""" + name
return name
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase : str = key.split(""".""" )
UpperCAmelCase : Optional[Any] = int(key_split[1] )
UpperCAmelCase : Tuple = int(key_split[3] )
UpperCAmelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase : Dict = val[:dim, :]
UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : int = val[
:dim
]
UpperCAmelCase : Any = val[
dim : dim * 2
]
UpperCAmelCase : Dict = val[
-dim:
]
else:
UpperCAmelCase : List[str] = val
return orig_state_dict
def __lowerCamelCase ( _lowercase , _lowercase ) -> Any:
UpperCAmelCase : Any = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCAmelCase : int = get_swin_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : int = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
UpperCAmelCase : List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase : List[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCAmelCase : Optional[Any] = timm_model(inputs["""pixel_values"""] )
UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Any = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 265 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> int:
return EnvironmentCommand()
class lowerCamelCase_( _UpperCamelCase ):
'''simple docstring'''
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def snake_case__ ( self ):
_lowerCamelCase = huggingface_hub.__version__
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase = torch.__version__
_lowerCamelCase = torch.cuda.is_available()
_lowerCamelCase = '''not installed'''
if is_transformers_available():
import transformers
_lowerCamelCase = transformers.__version__
_lowerCamelCase = '''not installed'''
if is_accelerate_available():
import accelerate
_lowerCamelCase = accelerate.__version__
_lowerCamelCase = '''not installed'''
if is_xformers_available():
import xformers
_lowerCamelCase = xformers.__version__
_lowerCamelCase = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 368 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = 2_5_6
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = cva.imread(lowerCamelCase__ , 0 )
_lowerCamelCase = copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
_lowerCamelCase = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = x[i] / self.k
self.sk += prk
_lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase = int(last % last )
_lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
_lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def snake_case__ ( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__SCREAMING_SNAKE_CASE : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 73 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_a : str = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : List[str] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_a : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_a : Optional[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_a : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_a : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_a : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_a : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = DPRContextEncoderTokenizer
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = DPRQuestionEncoderTokenizer
_a : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_a : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_a : List[str] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class __A :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
_lowerCAmelCase : Union[str, Any] = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
_lowerCAmelCase : Union[str, Any] = titles if not isinstance(a__ , a__ ) else [titles]
_lowerCAmelCase : List[Any] = texts if not isinstance(a__ , a__ ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), F"There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."
_lowerCAmelCase : str = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )["""input_ids"""]
_lowerCAmelCase : str = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )["""input_ids"""]
_lowerCAmelCase : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : Optional[Any] = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def __A ( self , a__ , a__ , a__ = 16 , a__ = 64 , a__ = 4 , ):
_lowerCAmelCase : Dict = reader_input["""input_ids"""]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = reader_output[:3]
_lowerCAmelCase : List[str] = len(a__ )
_lowerCAmelCase : Union[str, Any] = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : List[str] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Optional[Any] = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ )
_lowerCAmelCase : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_lowerCAmelCase : Union[str, Any] = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : int = ["input_ids", "attention_mask"]
_UpperCamelCase : Optional[int] = DPRReaderTokenizer
| 44 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = []
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_init_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_train_begin" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_train_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_epoch_begin" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_epoch_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_step_begin" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_step_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_evaluate" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_predict" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_save" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_log" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_prediction_step" )
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def UpperCamelCase__ ( self , lowercase_=0 , lowercase_=0 , lowercase_=64 , lowercase_=64 , lowercase_=None , lowercase_=False , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = RegressionDataset(length=lowercase_ )
UpperCAmelCase_ : List[str] = RegressionDataset(length=lowercase_ )
UpperCAmelCase_ : Optional[int] = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
UpperCAmelCase_ : Optional[Any] = RegressionPreTrainedModel(lowercase_ )
UpperCAmelCase_ : Dict = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
UpperCAmelCase_ : Optional[int] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
UpperCAmelCase_ : Union[str, Any] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = ["on_init_end", "on_train_begin"]
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : str = len(trainer.get_eval_dataloader() )
UpperCAmelCase_ : str = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_trainer()
UpperCAmelCase_ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase_ : Any = self.get_trainer(disable_tqdm=lowercase_ )
UpperCAmelCase_ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
UpperCAmelCase_ : Dict = self.get_trainer()
UpperCAmelCase_ : List[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
UpperCAmelCase_ : List[Any] = self.get_trainer()
UpperCAmelCase_ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
UpperCAmelCase_ : Dict = self.get_trainer()
UpperCAmelCase_ : str = trainer.callback_handler.callbacks[0]
UpperCAmelCase_ : List[str] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
UpperCAmelCase_ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
UpperCAmelCase_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
UpperCAmelCase_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
UpperCAmelCase_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
UpperCAmelCase_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
UpperCAmelCase_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0] | 361 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCamelCase__ : Optional[Any] = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Path , lowerCAmelCase__ : Union[str, None] = None , lowerCAmelCase__ : Union[List[str], None] = None , lowerCAmelCase__ : Union[str, List[str], None] = None , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = [file for file in os.listdir(lowerCAmelCase__ ) if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )]
if identifier is not None:
__SCREAMING_SNAKE_CASE : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for n_ in n_identifier:
__SCREAMING_SNAKE_CASE : Any = [file for file in files if n_ not in file]
else:
__SCREAMING_SNAKE_CASE : Any = [file for file in files if n_identifier not in file]
__SCREAMING_SNAKE_CASE : Any = ignore_files or []
ignore_files.append("""__init__.py""" )
__SCREAMING_SNAKE_CASE : Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCAmelCase__ )
if only_modules:
__SCREAMING_SNAKE_CASE : Optional[int] = file.split(""".""" )[0]
try:
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = doctest.DocTestSuite(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = unittest.TextTestRunner().run(lowerCAmelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"{module_identifier} is not a module." )
else:
__SCREAMING_SNAKE_CASE : int = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Path("""src/transformers""" )
__SCREAMING_SNAKE_CASE : int = "modeling"
__SCREAMING_SNAKE_CASE : Optional[int] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ , ignore_files=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = Path("""src/transformers""" )
__SCREAMING_SNAKE_CASE : List[Any] = "tokenization"
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = Path("""src/transformers""" )
__SCREAMING_SNAKE_CASE : Optional[int] = "configuration"
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = Path("""src/transformers""" )
__SCREAMING_SNAKE_CASE : Dict = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowerCAmelCase__ , n_identifier=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = Path("""docs/source""" )
__SCREAMING_SNAKE_CASE : List[str] = ["favicon.ico"]
self.analyze_directory(lowerCAmelCase__ , ignore_files=lowerCAmelCase__ , only_modules=lowerCAmelCase__ ) | 112 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=a , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self :Any , a :Optional[Any] ) -> Dict:
__UpperCamelCase : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__UpperCamelCase : Optional[int] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=a , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=a , py_version="py36" , )
def _lowerCamelCase ( self :Dict , a :Dict ) -> Optional[int]:
TrainingJobAnalytics(a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self :Dict , a :Tuple ) -> List[Any]:
# create estimator
__UpperCamelCase : int = self.create_estimator(a )
# run training
estimator.fit()
# result dataframe
__UpperCamelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a ) | 232 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """width_multiplier""" ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.25 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = make_divisible(512 * width_multiplier , divisor=8 )
UpperCamelCase__ = hidden_act
UpperCamelCase__ = conv_kernel_size
UpperCamelCase__ = output_stride
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = use_labels
UpperCamelCase__ = is_training
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = width_multiplier
UpperCamelCase__ = ffn_dropout
UpperCamelCase__ = attn_dropout
def _lowerCamelCase ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : str = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case : str = False
snake_case : Optional[Any] = False
snake_case : Dict = False
snake_case : int = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileViTVaModelTester(self )
UpperCamelCase__ = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase__ = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowerCAmelCase )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
UpperCamelCase__ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase__ = model.to(__lowerCAmelCase )
UpperCamelCase__ = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
UpperCamelCase__ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase__ = model.to(__lowerCAmelCase )
UpperCamelCase__ = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
UpperCamelCase__ = outputs.logits.detach().cpu()
UpperCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
UpperCamelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
UpperCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
UpperCamelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 358 |
def _UpperCamelCase (a__ :dict ):
"""simple docstring"""
UpperCamelCase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCamelCase__ = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def _UpperCamelCase (a__ :dict , a__ :int , a__ :set , a__ :set ):
"""simple docstring"""
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase_ = logging.getLogger(__name__)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = False
def UpperCAmelCase__ ( self : List[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Any , _A : str ):
"""simple docstring"""
if not self.initialized:
__SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__SCREAMING_SNAKE_CASE : Optional[int] = True
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.retriever.index.init_index()
def UpperCAmelCase__ ( self : str , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.retriever._main_retrieve(_A , _A )
return doc_ids, retrieved_doc_embeds
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple , _A : str , _A : List[str] , _A : Optional[int] , _A : List[str]=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_A ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__SCREAMING_SNAKE_CASE : List[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A )
for worker in self.retrieval_workers
] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : Tuple ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = ray.get(random_worker.retrieve.remote(_A , _A ) )
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
@classmethod
def UpperCAmelCase__ ( cls : int , _A : Dict , _A : str=None , **_A : Optional[Any] ):
"""simple docstring"""
return super(_A , cls ).get_tokenizers(_A , _A , **_A )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , _A : Any , _A : Optional[Any] , _A : Optional[Any]=None , **_A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''config''' , _A ) or RagConfig.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = RagTokenizer.from_pretrained(_A , config=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rag_tokenizer.question_encoder
__SCREAMING_SNAKE_CASE : Any = rag_tokenizer.generator
if indexed_dataset is not None:
__SCREAMING_SNAKE_CASE : Dict = '''custom'''
__SCREAMING_SNAKE_CASE : Optional[int] = CustomHFIndex(config.retrieval_vector_size , _A )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = cls._build_index(_A )
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 303 |
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303 | 1 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
SCREAMING_SNAKE_CASE_ : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ : List[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowerCamelCase )}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, )
UpperCAmelCase = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
def UpperCamelCase ( self: str ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class a :
"""simple docstring"""
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "The name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(default=_lowerCamelCase, metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
UpperCAmelCase = field(
default=5, metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
}, )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=0.1_5, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
UpperCAmelCase = field(
default=_lowerCamelCase, metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
}, )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if self.train_file is not None:
A__ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A__ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
A__ = [json.loads(UpperCAmelCase_ ) for line in f.read().splitlines() if (len(UpperCAmelCase_ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
A__ = {c: dataset[c] for c in dataset.column_names}
A__ = refs
return Dataset.from_dict(UpperCAmelCase_ )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
A__ = """text"""
A__ = load_dataset(UpperCAmelCase_ , data_files=UpperCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase_ )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase_ )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A__ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase_ )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
A__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
A__ = AutoModelForMaskedLM.from_config(UpperCAmelCase_ )
model.resize_token_embeddings(len(UpperCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A__ = datasets["""train"""].column_names
else:
A__ = datasets["""validation"""].column_names
A__ = """text""" if """text""" in column_names else column_names[0]
A__ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase_ : Dict ):
# Remove empty lines
A__ = [line for line in examples["""text"""] if len(UpperCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=data_args.max_seq_length )
A__ = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A__ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A__ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A__ = False
# Data collator
# This one will take care of randomly masking the tokens.
A__ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A__ = model_args.model_name_or_path
else:
A__ = None
A__ = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase_ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ = trainer.evaluate()
A__ = math.exp(eval_output["""eval_loss"""] )
A__ = perplexity
A__ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def _snake_case ( UpperCAmelCase_ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 69 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , torch.tensor(UpperCamelCase ) , torch.tensor(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase ):
A__ = processor.post_process_masks(UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Optional[int] , **UpperCamelCase: str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = processor.post_process_masks(
UpperCamelCase , tf.convert_to_tensor(UpperCamelCase ) , tf.convert_to_tensor(UpperCamelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
UpperCamelCase , np.array(UpperCamelCase ) , np.array(UpperCamelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ).image_processor
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(UpperCamelCase )]
A__ = [torch.tensor(UpperCamelCase )]
A__ = [[17_64, 26_46]]
A__ = [[6_83, 10_24]]
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""tf""" )
A__ = processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
A__ = image_processor(UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
A__ = processor(images=UpperCamelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
| 69 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ,)
lowerCAmelCase : int = field(
default=1_0_2_4 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
lowerCAmelCase : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
lowerCAmelCase : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
lowerCAmelCase : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "A csv or a json file containing the training data."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} )
lowerCAmelCase : Optional[str] = field(default=A_ ,metadata={"help": "A csv or a json file containing the test data."} )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowercase__ : List[str] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__ : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
default=A_ ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
lowerCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __UpperCAmelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ : Tuple = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__ : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__ : str = data_args.train_file.split('''.''' )[-1]
lowercase__ : Tuple = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__ : Dict = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowercase__ : Union[str, Any] = load_dataset('''csv''' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__ : Optional[Any] = load_dataset('''json''' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__ : int = raw_datasets['''train'''].features['''label'''].names
lowercase__ : List[Any] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__ : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
lowercase__ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : str = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__ : Any = {'''Refused''': 0, '''Entailed''': 1}
lowercase__ : str = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowercase__ : str = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__lowerCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowerCamelCase ):
lowercase__ : Dict = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowercase__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__ : Tuple = examples['''statement''']
lowercase__ : str = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowercase__ : Dict = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
lowercase__ : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowercase__ : List[Any] = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase__ : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowercase__ : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase__ : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowercase__ : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowercase__ : Optional[Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowercase__ : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase ):
lowercase__ : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
lowercase__ : Dict = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : List[str] = default_data_collator
elif training_args.fpaa:
lowercase__ : Any = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
lowercase__ : List[Any] = None
# Initialize our Trainer
lowercase__ : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
lowercase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : int = last_checkpoint
lowercase__ : List[str] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
lowercase__ : List[str] = train_result.metrics
lowercase__ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
lowercase__ : Any = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , __lowerCamelCase )
trainer.save_metrics('''train''' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=__lowerCamelCase )
lowercase__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
lowercase__ : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' , __lowerCamelCase )
trainer.save_metrics('''eval''' , __lowerCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__ : Tuple = predict_dataset.remove_columns('''label''' )
lowercase__ : str = trainer.predict(__lowerCamelCase , metric_key_prefix='''predict''' ).predictions
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=1 )
lowercase__ : List[Any] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__lowerCamelCase ):
lowercase__ : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
lowercase__ : Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 16 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ ( A__ ) -> list[list[float]]:
"""simple docstring"""
snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case = [[0.0, 0.0], [0.0, 0.0]]
snake_case , snake_case = matrix[1][1], matrix[0][0]
snake_case , snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 137 |
from ....utils import logging
_A = logging.get_logger(__name__)
class lowerCamelCase ( A_ ):
def __init__(self : Tuple , _A : Optional[int] , _A : Tuple=None , _A : Union[str, Any]=2_0_4_8 ) -> List[Any]:
snake_case = config.__dict__
snake_case = modal_hidden_size
if num_labels:
snake_case = num_labels
| 137 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
__a = shutil.get_terminal_size()
__a = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
lowercase = 0
lowercase = 1
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" ) -> Tuple:
sys.stdout.write(str(_lowerCAmelCase ) + end )
sys.stdout.flush()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" ) -> List[str]:
forceWrite(f"\u001b[{color}m{content}\u001b[0m" , _lowerCAmelCase )
def __snake_case( ) -> Any:
forceWrite("""\r""" )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __snake_case( ) -> List[str]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __snake_case( ) -> int:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 35 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase__: Union[str, Any] = "examples/"
UpperCamelCase__: Optional[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase__: Optional[int] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase__: List[Any] = "README.md"
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]:
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]:
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
UpperCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase : Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Tuple = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def snake_case_ ( ) -> Any:
UpperCAmelCase : List[Any] = get_version()
UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase__: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 23 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase_ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
elif args.student_type == "gpt2":
UpperCamelCase__ = False
def __magic_name__ ( __a : List[str] , __a : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=lowerCAmelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCAmelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCAmelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=lowerCAmelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=lowerCAmelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=lowerCAmelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCAmelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCAmelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=lowerCAmelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=lowerCAmelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCAmelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCAmelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCAmelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=lowerCAmelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=lowerCAmelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=lowerCAmelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=lowerCAmelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCAmelCase__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=lowerCAmelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCAmelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCAmelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCAmelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=lowerCAmelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=lowerCAmelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=lowerCAmelCase__ , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=lowerCAmelCase__ , default=4_000 , help="""Checkpoint interval.""" )
UpperCamelCase__ = parser.parse_args()
sanity_checks(lowerCAmelCase__ )
# ARGS #
init_gpu_params(lowerCAmelCase__ )
set_seed(lowerCAmelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(lowerCAmelCase__ ) , lowerCAmelCase__ , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ = tokenizer.all_special_tokens.index(lowerCAmelCase__ )
UpperCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ = special_tok_ids
UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(lowerCAmelCase__ )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(lowerCAmelCase__ )
UpperCamelCase__ = np.maximum(lowerCAmelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ = 0.0 # do not predict special tokens
UpperCamelCase__ = torch.from_numpy(lowerCAmelCase__ )
else:
UpperCamelCase__ = None
UpperCamelCase__ = LmSeqsDataset(params=lowerCAmelCase__ , data=lowerCAmelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCAmelCase__ )
else:
UpperCamelCase__ = student_model_class(lowerCAmelCase__ )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCAmelCase__ )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCAmelCase__ , lowerCAmelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCAmelCase__ , lowerCAmelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ = Distiller(
params=lowerCAmelCase__ , dataset=lowerCAmelCase__ , token_probs=lowerCAmelCase__ , student=lowerCAmelCase__ , teacher=lowerCAmelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 360 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 178 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__: Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]=False ) -> str:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Optional[Any] = len(set_a.intersection(UpperCAmelCase__ ) )
if alternative_union:
lowercase_ : Dict = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ )
else:
lowercase_ : List[Any] = len(set_a.union(UpperCAmelCase__ ) )
return intersection / union
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) ):
lowercase_ : List[str] = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ : Optional[Any] = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) / union
else:
lowercase_ : str = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return None
if __name__ == "__main__":
_lowercase : Union[str, Any] = {"a", "b", "c", "d", "e"}
_lowercase : Dict = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 21 | '''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ ( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
lowercase_ : Union[str, Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoConfig.from_pretrained("""gpt2""" )
lowercase_ : List[Any] = GenerationConfig.from_model_config(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = GenerationConfig()
lowercase_ : int = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase_ : List[str] = copy.deepcopy(lowercase_ )
lowercase_ : Tuple = generation_config.update(**lowercase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {"""foo""": """bar"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = GenerationConfig()
lowercase_ : int = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = GenerationConfig.from_pretrained(lowercase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase_ : List[str] = GenerationConfig.from_model_config(lowercase_ )
assert not hasattr(lowercase_ , """foo""" ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowercase_ )
self.assertEqual(default_config.num_beams , 1 )
lowercase_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowercase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowercase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any ):
lowercase_ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase_ : List[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""test-generation-config""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase_ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase_ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
| 21 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase = pytest.mark.integration
@require_faiss
class UpperCamelCase ( lowerCAmelCase__ ):
def a_ ( self) -> Optional[Any]:
snake_case_ = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCAmelCase__) for x in np.arange(30).tolist()]})
return dset
def a_ ( self) -> Union[str, Any]:
import faiss
snake_case_ = self._create_dummy_dataset()
snake_case_ = dset.map(
lambda lowerCAmelCase__, lowerCAmelCase__: {"vecs": i * np.ones(5, dtype=np.floataa)}, with_indices=lowerCAmelCase__, keep_in_memory=lowerCAmelCase__)
snake_case_ = dset.add_faiss_index('vecs', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT)
snake_case_ , snake_case_ = dset.get_nearest_examples('vecs', np.ones(5, dtype=np.floataa))
self.assertEqual(examples['filename'][0], 'my_name-train_29')
dset.drop_index('vecs')
def a_ ( self) -> str:
import faiss
snake_case_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name='vecs', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, )
snake_case_ , snake_case_ = dset.get_nearest_examples('vecs', np.ones(5, dtype=np.floataa))
self.assertEqual(examples['filename'][0], 'my_name-train_29')
def a_ ( self) -> Optional[Any]:
import faiss
snake_case_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name='vecs', metric_type=faiss.METRIC_INNER_PRODUCT, )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__) as tmp_file:
dset.save_faiss_index('vecs', tmp_file.name)
dset.load_faiss_index('vecs2', tmp_file.name)
os.unlink(tmp_file.name)
snake_case_ , snake_case_ = dset.get_nearest_examples('vecs2', np.ones(5, dtype=np.floataa))
self.assertEqual(examples['filename'][0], 'my_name-train_29')
def a_ ( self) -> List[str]:
snake_case_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowerCAmelCase__, partial(dset.get_nearest_examples, 'vecs2', np.ones(5, dtype=np.floataa)))
def a_ ( self) -> str:
from elasticsearch import Elasticsearch
snake_case_ = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
snake_case_ = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30)
snake_case_ = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
snake_case_ = Elasticsearch()
dset.add_elasticsearch_index('filename', es_client=lowerCAmelCase__)
snake_case_ , snake_case_ = dset.get_nearest_examples('filename', 'my_name-train_29')
self.assertEqual(examples['filename'][0], 'my_name-train_29')
@require_faiss
class UpperCamelCase ( lowerCAmelCase__ ):
def a_ ( self) -> Tuple:
import faiss
snake_case_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5, dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal, 5)
index.add_vectors(np.zeros((5, 5), dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal, 10)
# single query
snake_case_ = np.zeros(5, dtype=np.floataa)
snake_case_ = 1
snake_case_ , snake_case_ = index.search(lowerCAmelCase__)
self.assertRaises(lowerCAmelCase__, index.search, query.reshape(-1, 1))
self.assertGreater(scores[0], 0)
self.assertEqual(indices[0], 1)
# batched queries
snake_case_ = np.eye(5, dtype=np.floataa)[::-1]
snake_case_ , snake_case_ = index.search_batch(lowerCAmelCase__)
self.assertRaises(lowerCAmelCase__, index.search_batch, queries[0])
snake_case_ = [scores[0] for scores in total_scores]
snake_case_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__), 0)
self.assertListEqual([4, 3, 2, 1, 0], lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
import faiss
snake_case_ = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5, dtype=np.floataa))
self.assertIsInstance(index.faiss_index, faiss.IndexFlat)
snake_case_ = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5, dtype=np.floataa))
self.assertIsInstance(index.faiss_index, faiss.IndexLSH)
with self.assertRaises(lowerCAmelCase__):
snake_case_ = FaissIndex(string_factory='Flat', custom_index=faiss.IndexFlat(5))
def a_ ( self) -> List[str]:
import faiss
snake_case_ = faiss.IndexFlat(5)
snake_case_ = FaissIndex(custom_index=lowerCAmelCase__)
index.add_vectors(np.eye(5, dtype=np.floataa))
self.assertIsInstance(index.faiss_index, faiss.IndexFlat)
def a_ ( self) -> Any:
import faiss
snake_case_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5, dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__) as tmp_file:
index.save(tmp_file.name)
snake_case_ = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
snake_case_ = np.zeros(5, dtype=np.floataa)
snake_case_ = 1
snake_case_ , snake_case_ = index.search(lowerCAmelCase__)
self.assertGreater(scores[0], 0)
self.assertEqual(indices[0], 1)
@require_faiss
def UpperCAmelCase ( UpperCAmelCase ) -> str:
import faiss
snake_case_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
snake_case_ = 'index.faiss'
snake_case_ = f'mock://{index_name}'
index.save(UpperCAmelCase , storage_options=mockfs.storage_options )
snake_case_ = FaissIndex.load(UpperCAmelCase , storage_options=mockfs.storage_options )
snake_case_ = np.zeros(5 , dtype=np.floataa )
snake_case_ = 1
snake_case_ , snake_case_ = index.search(UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase ( lowerCAmelCase__ ):
def a_ ( self) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
snake_case_ = Elasticsearch()
snake_case_ = {'acknowledged': True}
snake_case_ = ElasticSearchIndex(es_client=lowerCAmelCase__)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
snake_case_ = 'foo'
snake_case_ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
snake_case_ , snake_case_ = index.search(lowerCAmelCase__)
self.assertEqual(scores[0], 1)
self.assertEqual(indices[0], 0)
# single query with timeout
snake_case_ = 'foo'
snake_case_ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
snake_case_ , snake_case_ = index.search(lowerCAmelCase__, request_timeout=30)
self.assertEqual(scores[0], 1)
self.assertEqual(indices[0], 0)
# batched queries
snake_case_ = ['foo', 'bar', 'foobar']
snake_case_ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
snake_case_ , snake_case_ = index.search_batch(lowerCAmelCase__)
snake_case_ = [scores[0] for scores in total_scores]
snake_case_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__), 0)
self.assertListEqual([1, 1, 1], lowerCAmelCase__)
# batched queries with timeout
snake_case_ = ['foo', 'bar', 'foobar']
snake_case_ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
snake_case_ , snake_case_ = index.search_batch(lowerCAmelCase__, request_timeout=30)
snake_case_ = [scores[0] for scores in total_scores]
snake_case_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__), 0)
self.assertListEqual([1, 1, 1], lowerCAmelCase__)
| 69 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 1 |
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _A ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = StableUnCLIPPipeline
_lowercase : int = TEXT_TO_IMAGE_PARAMS
_lowercase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowercase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = embedder_hidden_size
# prior components
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=a , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ))
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a , num_layers=1 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=a , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=a)
SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ))
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a , layers_per_block=1 , upcast_attention=a , use_linear_projection=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=a , steps_offset=1 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL()
SCREAMING_SNAKE_CASE = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Tuple:
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=a)
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy')
SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = pipe('anime turle' , generator=a , output_type='np')
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = pipe.to(a)
pipe.set_progress_bar_config(disable=a)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 137 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a_ : Tuple = logging.getLogger()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('-f')
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> None:
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(a , 'argv' , a):
SCREAMING_SNAKE_CASE = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(a , 0.6_66)
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(a)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(a)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(a)
| 137 | 1 |
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
_snake_case = len(bin(UpperCamelCase__ )[3:] )
_snake_case = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
_snake_case = (
(
'1'
+ '0' * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''yolos'''
def __init__( self , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=[5_1_2, 8_6_4] , _UpperCamelCase=1_6 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=1_0_0 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=1 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=0.1 , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : Optional[int] = num_detection_tokens
UpperCAmelCase_ : Dict = use_mid_position_embeddings
UpperCAmelCase_ : Union[str, Any] = auxiliary_loss
# Hungarian matcher
UpperCAmelCase_ : List[str] = class_cost
UpperCAmelCase_ : Dict = bbox_cost
UpperCAmelCase_ : str = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1E-4
@property
def __UpperCAmelCase ( self ) -> int:
return 1_2
| 29 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_ , a_=False):
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head'):
snake_case_ = 'segformer.encoder.' + key
if key.startswith('backbone'):
snake_case_ = key.replace('backbone' , 'segformer.encoder')
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ = key[key.find('patch_embed') + len('patch_embed')]
snake_case_ = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(a_)-1}''')
if "norm" in key:
snake_case_ = key.replace('norm' , 'layer_norm')
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ = key[key.find('segformer.encoder.layer_norm') + len('segformer.encoder.layer_norm')]
snake_case_ = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(a_)-1}''')
if "layer_norm1" in key:
snake_case_ = key.replace('layer_norm1' , 'layer_norm_1')
if "layer_norm2" in key:
snake_case_ = key.replace('layer_norm2' , 'layer_norm_2')
if "block" in key:
# replace for example block1 by block.0
snake_case_ = key[key.find('block') + len('block')]
snake_case_ = key.replace(f'''block{idx}''' , f'''block.{int(a_)-1}''')
if "attn.q" in key:
snake_case_ = key.replace('attn.q' , 'attention.self.query')
if "attn.proj" in key:
snake_case_ = key.replace('attn.proj' , 'attention.output.dense')
if "attn" in key:
snake_case_ = key.replace('attn' , 'attention.self')
if "fc1" in key:
snake_case_ = key.replace('fc1' , 'dense1')
if "fc2" in key:
snake_case_ = key.replace('fc2' , 'dense2')
if "linear_pred" in key:
snake_case_ = key.replace('linear_pred' , 'classifier')
if "linear_fuse" in key:
snake_case_ = key.replace('linear_fuse.conv' , 'linear_fuse')
snake_case_ = key.replace('linear_fuse.bn' , 'batch_norm')
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ = key[key.find('linear_c') + len('linear_c')]
snake_case_ = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(a_)-1}''')
if key.startswith('head'):
snake_case_ = key.replace('head' , 'classifier')
snake_case_ = value
return new_state_dict
def __UpperCAmelCase ( a_ , a_):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks):
for j in range(config.depths[i]):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''')
snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''')
# next, add keys and values (in that order) to the state dict
snake_case_ = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ = kv_bias[: config.hidden_sizes[i]]
snake_case_ = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ = kv_bias[
config.hidden_sizes[i] :
]
def __UpperCAmelCase ( ):
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(a_ , stream=a_).raw)
return image
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = SegformerConfig()
snake_case_ = False
# set attributes based on model_name
snake_case_ = 'huggingface/label-files'
if "segformer" in model_name:
snake_case_ = model_name[len('segformer.') : len('segformer.') + 2]
if "ade" in model_name:
snake_case_ = 1_50
snake_case_ = 'ade20k-id2label.json'
snake_case_ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
snake_case_ = 19
snake_case_ = 'cityscapes-id2label.json'
snake_case_ = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'''Model {model_name} not supported''')
elif "mit" in model_name:
snake_case_ = True
snake_case_ = model_name[4:6]
snake_case_ = 10_00
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = (1, 10_00)
else:
raise ValueError(f'''Model {model_name} not supported''')
# set config attributes
snake_case_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset') , 'r'))
snake_case_ = {int(a_): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 2_56
elif size == "b2":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 4, 6, 3]
elif size == "b3":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 4, 18, 3]
elif size == "b4":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 8, 27, 3]
elif size == "b5":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor (only resize + normalize)
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=a_ , align=a_ , do_random_crop=a_)
# prepare image
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a_ , return_tensors='pt').pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
if encoder_only:
snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))
else:
snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))['state_dict']
# rename keys
snake_case_ = rename_keys(a_ , encoder_only=a_)
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_)
# create HuggingFace model and load state dict
if encoder_only:
snake_case_ = False
snake_case_ = SegformerForImageClassification(a_)
else:
snake_case_ = SegformerForSemanticSegmentation(a_)
model.load_state_dict(a_)
model.eval()
# forward pass
snake_case_ = model(a_)
snake_case_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
])
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
])
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
])
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
])
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
])
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case_ = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
])
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
])
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
])
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case_ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
])
elif model_name == "segformer.b0.768x768.city.160k":
snake_case_ = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
])
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
])
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
])
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
])
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
])
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
])
else:
snake_case_ = logits.argmax(-1).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx])
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
image_processor.save_pretrained(a_)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 178 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] , lowerCAmelCase__ :str ) -> list[int]:
'''simple docstring'''
lowercase = int(lowerCAmelCase__ )
# Initialize Result
lowercase = []
# Traverse through all denomination
for denomination in reversed(lowerCAmelCase__ ):
# Find denominations
while int(lowerCAmelCase__ ) >= int(lowerCAmelCase__ ):
total_value -= int(lowerCAmelCase__ )
answer.append(lowerCAmelCase__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCAmelCase : str =[]
__lowerCAmelCase : Union[str, Any] ="""0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__lowerCAmelCase : Optional[Any] =int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__lowerCAmelCase : Optional[int] =input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCAmelCase : Tuple =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__lowerCAmelCase : int =input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
__lowerCAmelCase : List[Any] =find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 32 | """simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=18, lowerCamelCase=30, lowerCamelCase=4_00, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, ) -> List[str]:
"""simple docstring"""
_lowercase : str = size if size is not None else {'height': 18, 'width': 18}
_lowercase : Optional[int] = parent
_lowercase : Tuple = batch_size
_lowercase : int = num_channels
_lowercase : Dict = image_size
_lowercase : List[str] = min_resolution
_lowercase : List[str] = max_resolution
_lowercase : Optional[int] = do_resize
_lowercase : Any = size
_lowercase : Tuple = do_normalize
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = ImageGPTImageProcessingTester(self)
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase, 'clusters'))
self.assertTrue(hasattr(lowerCamelCase, 'do_resize'))
self.assertTrue(hasattr(lowerCamelCase, 'size'))
self.assertTrue(hasattr(lowerCamelCase, 'do_normalize'))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 18})
_lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : int = self.image_processing_class(**self.image_processor_dict)
_lowercase : Union[str, Any] = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase, obj[key]))
else:
self.assertEqual(obj[key], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Union[str, Any] = os.path.join(lowerCamelCase, 'image_processor.json')
image_processor_first.to_json_file(lowerCamelCase)
_lowercase : Union[str, Any] = self.image_processing_class.from_json_file(lowerCamelCase).to_dict()
_lowercase : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase, image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key], lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = self.image_processing_class.from_pretrained(lowerCamelCase).to_dict()
_lowercase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase, image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key], lowerCamelCase)
@unittest.skip('ImageGPT requires clusters at initialization')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_( ) -> Tuple:
_lowercase : Tuple = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
_lowercase : List[str] = Image.open(dataset[4]['file'] )
_lowercase : Tuple = Image.open(dataset[5]['file'] )
_lowercase : Dict = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small')
_lowercase : Dict = prepare_images()
# test non-batched
_lowercase : int = image_processing(images[0], return_tensors='pt')
self.assertIsInstance(encoding.input_ids, torch.LongTensor)
self.assertEqual(encoding.input_ids.shape, (1, 10_24))
_lowercase : Dict = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist(), lowerCamelCase)
# test batched
_lowercase : List[str] = image_processing(lowerCamelCase, return_tensors='pt')
self.assertIsInstance(encoding.input_ids, torch.LongTensor)
self.assertEqual(encoding.input_ids.shape, (2, 10_24))
_lowercase : List[Any] = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), lowerCamelCase)
| 21 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : np.ndarray
__UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 144 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_a = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_a , _a = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_a = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_a = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_a = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 144 | 1 |
SCREAMING_SNAKE_CASE__ : Dict = {str(digit): digit**5 for digit in range(10)}
def A ( _SCREAMING_SNAKE_CASE ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_SCREAMING_SNAKE_CASE ) )
def A ( ) -> int:
return sum(
number
for number in range(1000 ,100_0000 )
if number == digits_fifth_powers_sum(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 48 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = '''src/transformers'''
A = '''docs/source/en/tasks'''
def __A ( a_ :List[Any] , a_ :List[Any] , a_ :List[str]) -> List[str]:
with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
__a : List[str] = f.readlines()
# Find the start prompt.
__a : Optional[Any] = 0
while not lines[start_index].startswith(a_):
start_index += 1
start_index += 1
__a : int = start_index
while not lines[end_index].startswith(a_):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __A ( a_ :Optional[Any]) -> Any:
__a : List[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__a : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ , set())
__a : Any = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()]) + "\n"
def __A ( a_ :Optional[int] , a_ :Dict=False) -> Any:
__a , __a , __a , __a : Any = _find_text_in_file(
filename=os.path.join(a_ , a_) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__a : Optional[Any] = get_model_list_for_task(a_)
if current_list != new_list:
if overwrite:
with open(os.path.join(a_ , a_) , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 188 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase= XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase__ )
__lowercase, __lowercase= XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase__ , output_loading_info=lowercase__ )
else:
__lowercase= ProphetNetForConditionalGenerationOld.from_pretrained(lowercase__ )
__lowercase, __lowercase= ProphetNetForConditionalGeneration.from_pretrained(
lowercase__ , output_loading_info=lowercase__ )
__lowercase= ['key_proj', 'value_proj', 'query_proj']
__lowercase= {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowercase= key.split('.' )
if attributes[0] == "lm_head":
__lowercase= prophet
__lowercase= prophet_old
else:
__lowercase= prophet.prophetnet
__lowercase= prophet_old.model
__lowercase= False
for attribute in attributes:
if attribute in mapping:
__lowercase= mapping[attribute]
if not hasattr(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__lowercase= attribute
elif hasattr(lowercase__ , lowercase__ ):
__lowercase= attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase= old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowercase= True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase= old_model.bias
logger.info(F'{attribute} is initialized' )
__lowercase= True
break
elif attribute in special_keys and hasattr(lowercase__ , 'in_proj_weight' ):
__lowercase= old_model.in_proj_weight.shape[0] // 3
__lowercase= getattr(lowercase__ , lowercase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase= nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase= nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase= nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase= nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase= nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase= nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase= True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
__lowercase= nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
__lowercase= True
break
if attribute.isdigit():
__lowercase= model[int(lowercase__ )]
__lowercase= old_model[int(lowercase__ )]
else:
__lowercase= getattr(lowercase__ , lowercase__ )
if old_attribute == "":
__lowercase= old_model
else:
if not hasattr(lowercase__ , lowercase__ ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowercase= getattr(lowercase__ , lowercase__ )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295 |
from math import isqrt
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= 1
__lowercase= 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 295 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : List[str] ='''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[str] =direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase : Union[str, Any] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : Optional[int] =re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase : List[Any] ={
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
UpperCamelCase__ : Tuple = None
# source code of `config_class`
UpperCamelCase__ : int = inspect.getsource(A_ )
UpperCamelCase__ : Dict = _re_checkpoint.findall(A_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase__ : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ : Any = ckpt_name
break
return checkpoint
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase__ : str = get_checkpoint_from_config_class(A_ )
UpperCamelCase__ : List[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
UpperCamelCase__ : Tuple = '''\n'''.join(sorted(A_ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 358 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __a :
_lowerCAmelCase : CommonSchedulerState
# setable values
_lowerCAmelCase : jnp.ndarray
_lowerCAmelCase : jnp.ndarray
_lowerCAmelCase : Optional[int] = None
@classmethod
def __lowercase ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
'''simple docstring'''
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class __a ( A__ ):
_lowerCAmelCase : DDPMSchedulerState
class __a ( A__ , A__ ):
_lowerCAmelCase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCAmelCase : jnp.dtype
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 10_00 , SCREAMING_SNAKE_CASE : float = 0.0_0_0_1 , SCREAMING_SNAKE_CASE : float = 0.0_2 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCamelCase__ : int = dtype
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCamelCase__ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase__ : str = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase__ : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
'''simple docstring'''
return sample
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : Optional[int] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase__ : List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase__ : Optional[Any] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
UpperCamelCase__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase__ : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase__ : Tuple = variance
UpperCamelCase__ : int = state.common.betas[t]
UpperCamelCase__ : Union[str, Any] = (predicted_variance + 1) / 2
UpperCamelCase__ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
UpperCamelCase__ : str = timestep
if key is None:
UpperCamelCase__ : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
UpperCamelCase__ : Optional[int] = None
# 1. compute alphas, betas
UpperCamelCase__ : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase__ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase__ : Any = 1 - alpha_prod_t
UpperCamelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase__ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase__ : Dict = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase__ : List[str] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase__ : str = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
UpperCamelCase__ : Tuple = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
UpperCamelCase__ : Any = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps | 196 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : list[list] ) -> list[list]:
"""simple docstring"""
a_ : List[str] = current_set.copy()
for row_index, row in enumerate(__A ):
a_ : List[str] = row[0]
for column_index, column in enumerate(__A ):
if magnitude == 0:
a_ : Any = column
continue
a_ : List[str] = column / magnitude
# Subtract to cancel term
a_ : Any = current_set[0]
a_ : Optional[int] = [first_row]
a_ : Dict = current_set[1::]
for row in current_set:
a_ : str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__A )
continue
for column_index in range(len(__A ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__A )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a_ : Dict = final_set[0]
a_ : Union[str, Any] = []
a_ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a_ : List[Any] = simplify(__A )
for i in range(len(__A ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __A )
a_ : Any = resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( __A : list[list] ) -> list:
"""simple docstring"""
if len(__A ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
a_ : List[Any] = len(__A ) + 1
if any(len(__A ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(__A , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(__A ) == 1:
return [equations[0][-1] / equations[0][0]]
a_ : Union[str, Any] = equations.copy()
if any(0 in row for row in data_set ):
a_ : Any = data_set.copy()
a_ : Tuple = []
for row_index, row in enumerate(__A ):
if 0 not in row:
a_ : Any = data_set.pop(__A )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , __A )
a_ : List[Any] = data_set.copy()
a_ : Optional[Any] = simplify(__A )
a_ : Union[str, Any] = simplified[::-1]
a_ : list = []
for row in simplified:
a_ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a_ : int = row.copy()[: len(__A ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__A ) == 0:
solutions.append(0 )
continue
a_ : List[Any] = temp_row[1::]
a_ : Optional[int] = temp_row[::-1]
for column_index, column in enumerate(__A ):
current_solution -= column * solutions[column_index]
solutions.append(__A )
a_ : Tuple = []
for item in solutions:
final.append(float(round(__A , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[str] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : str = use_input_mask
a_ : int = use_token_type_ids
a_ : List[str] = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Tuple = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : str = scope
a_ : Optional[int] = range_bbox
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : int = bbox[i, j, 3]
a_ : str = bbox[i, j, 1]
a_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : List[str] = bbox[i, j, 0]
a_ : Union[str, Any] = t
a_ : List[Any] = None
if self.use_input_mask:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
a_ : Any = self.num_labels
a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : str = LiltModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.Size([1, 2, 7_6_8] )
a_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 32 | 1 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
SCREAMING_SNAKE_CASE_ : Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
SCREAMING_SNAKE_CASE_ : Any = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
SCREAMING_SNAKE_CASE_ : Tuple = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
SCREAMING_SNAKE_CASE_ : List[str] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] , UpperCamelCase: str , UpperCamelCase: Tuple=[1, 10, 1_00] , UpperCamelCase: str=4 , UpperCamelCase: Dict=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=UpperCamelCase ) as executor:
A__ = []
A__ = Counter()
A__ = 0
A__ = defaultdict(UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase , UpperCamelCase ) ):
for candidate in candidates:
A__ = candidate + """\n""" + test_case
A__ = (test_program, timeout, task_id, completion_id[task_id])
A__ = executor.submit(UpperCamelCase , *UpperCamelCase )
futures.append(UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase ):
A__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
A__ , A__ = [], []
for result in results.values():
result.sort()
A__ = [r[1]["""passed"""] for r in result]
total.append(len(UpperCamelCase ) )
correct.append(sum(UpperCamelCase ) )
A__ = np.array(UpperCamelCase )
A__ = np.array(UpperCamelCase )
A__ = k
A__ = {f"""pass@{k}""": estimate_pass_at_k(UpperCamelCase , UpperCamelCase , UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
def estimator(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
A__ = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 365 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_config()
A__ = 3_00
return config
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = DebertaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 69 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
A__ : Dict = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A__ : Tuple = (((515, 22, 13), 555), ((61, 35, 49), 150))
A__ : Tuple = [2, 4, 1, 5]
A__ : Tuple = len(train_data)
A__ : List[str] = 0.009
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple="train" ) -> Union[str, Any]:
return calculate_hypothesis_value(lowerCamelCase__ , lowerCamelCase__ ) - output(
lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Union[str, Any]:
lowerCamelCase_ : int =0
for i in range(len(lowerCamelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=m ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] =0
for i in range(lowerCamelCase__ ):
if index == -1:
summation_value += _error(lowerCamelCase__ )
else:
summation_value += _error(lowerCamelCase__ ) * train_data[i][0][index]
return summation_value
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
lowerCamelCase_ : Optional[Any] =summation_of_cost_derivative(lowerCamelCase__ , lowerCamelCase__ ) / m
return cost_derivative_value
def _snake_case ( ) -> Dict:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ : List[str] =0.00_0002
lowerCamelCase_ : Tuple =0
lowerCamelCase_ : Tuple =0
while True:
j += 1
lowerCamelCase_ : Optional[int] =[0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase__ ) ):
lowerCamelCase_ : str =get_cost_derivative(i - 1 )
lowerCamelCase_ : List[str] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase__ , lowerCamelCase__ , atol=lowerCamelCase__ , rtol=lowerCamelCase__ , ):
break
lowerCamelCase_ : Dict =temp_parameter_vector
print(("Number of iterations:", j) )
def _snake_case ( ) -> Optional[int]:
for i in range(len(lowerCamelCase__ ) ):
print(("Actual output value:", output(lowerCamelCase__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 144 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase :List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase :List[str] = ["image_embeds", "negative_image_embeds", "image", "hint"]
_UpperCAmelCase :Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase :str = False
@property
def UpperCAmelCase__ ( self : Tuple ):
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : int ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return 100
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] ={
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase_ : Union[str, Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self : Any ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ : int =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =self.dummy_unet
lowerCamelCase_ : Optional[Any] =self.dummy_movq
lowerCamelCase_ : Optional[Any] ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase_ : Optional[Any] =DDIMScheduler(**snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str , snake_case__ : str=0 ):
lowerCamelCase_ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCamelCase_ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCamelCase_ : List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCamelCase_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Tuple =Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
# create hint
lowerCamelCase_ : Dict =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : List[Any] =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : List[str] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : Dict ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any ="cpu"
lowerCamelCase_ : Dict =self.get_dummy_components()
lowerCamelCase_ : Dict =self.pipeline_class(**snake_case__ )
lowerCamelCase_ : str =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[Any] =pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Dict =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[int] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Union[str, Any] =np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : List[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCamelCase_ : Optional[int] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase_ : Optional[int] =init_image.resize((512, 512) )
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCamelCase_ : Any =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
lowerCamelCase_ : Union[str, Any] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ : str ="A robot, 4k photo"
lowerCamelCase_ : List[Any] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCamelCase_ : Any =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
lowerCamelCase_ : List[str] =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple =torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ : Tuple =pipe_prior(
snake_case__ , image=snake_case__ , strength=0.85 , generator=snake_case__ , negative_prompt="" , ).to_tuple()
lowerCamelCase_ : str =pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
lowerCamelCase_ : Optional[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 144 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : List[str] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE_: str = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE_: Any = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE_: Tuple = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE_: Any = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE_: Optional[int] = [64, 80, 96]
SCREAMING_SNAKE_CASE_: Optional[Any] = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE_: Any = 0.0_5
SCREAMING_SNAKE_CASE_: Tuple = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE_: List[str] = 5_12
SCREAMING_SNAKE_CASE_: Tuple = 16
SCREAMING_SNAKE_CASE_: int = 21
SCREAMING_SNAKE_CASE_: Union[str, Any] = "pascal-voc-id2label.json"
else:
SCREAMING_SNAKE_CASE_: int = 10_00
SCREAMING_SNAKE_CASE_: int = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: List[str] = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Dict = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: List[str] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Tuple = idalabel
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
for i in range(1 , 6 ):
if f"layer_{i}." in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace(".block." , "." )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE_: Dict = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
SCREAMING_SNAKE_CASE_: Any = name.replace(".norm." , ".normalization." )
if ".conv." in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE_: Any = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
SCREAMING_SNAKE_CASE_: Any = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
SCREAMING_SNAKE_CASE_: str = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE_: str = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
SCREAMING_SNAKE_CASE_: Any = name.replace(f".global_rep.{i}.weight" , ".layernorm.weight" )
if f".global_rep.{i}.bias" in name:
SCREAMING_SNAKE_CASE_: str = name.replace(f".global_rep.{i}.bias" , ".layernorm.bias" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE_: Tuple = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE_: str = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE_: Tuple = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE_: int = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE_: Tuple = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
SCREAMING_SNAKE_CASE_: str = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE_: List[str] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE_: Tuple = "mobilevit." + name
return name
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
if base_model:
SCREAMING_SNAKE_CASE_: Tuple = ""
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = "mobilevit."
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: int = orig_state_dict.pop(_UpperCAmelCase )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE_: Tuple = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict = key.split("." )
SCREAMING_SNAKE_CASE_: int = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE_: int = int(key_split[3] )
SCREAMING_SNAKE_CASE_: List[Any] = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
SCREAMING_SNAKE_CASE_: List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE_: int = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
SCREAMING_SNAKE_CASE_: Tuple = val[:dim, :]
SCREAMING_SNAKE_CASE_: Dict = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Any = val[:dim]
SCREAMING_SNAKE_CASE_: List[Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Any = val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Dict = val
return orig_state_dict
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Tuple = get_mobilevit_config(_UpperCAmelCase )
# load original state_dict
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.load(_UpperCAmelCase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE_: Optional[Any] = MobileViTForSemanticSegmentation(_UpperCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = MobileViTForImageClassification(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_: str = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE_: List[str] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_: int = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE_: str = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE_: str = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
SCREAMING_SNAKE_CASE_: List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
SCREAMING_SNAKE_CASE_: Optional[int] = model_mapping[mobilevit_name]
image_processor.push_to_hub(_UpperCAmelCase , organization="apple" )
model.push_to_hub(_UpperCAmelCase , organization="apple" )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 127 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=64, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, lowercase_=2, lowercase_=2, lowercase_=2, lowercase_=2, lowercase_=4, lowercase_=1, ) -> Any:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =scope
a__ =q_groups
a__ =k_groups
a__ =v_groups
a__ =post_attention_groups
a__ =intermediate_groups
a__ =output_groups
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ =SqueezeBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =SqueezeBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ =SqueezeBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(
lowercase_, attention_mask=lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Any:
"""simple docstring"""
a__ =self.num_labels
a__ =SqueezeBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> int:
"""simple docstring"""
a__ =self.num_labels
a__ =SqueezeBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =self.num_choices
a__ =SqueezeBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
((a__), (a__), (a__), (a__), (a__), (a__)) =config_and_inputs
a__ ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : Union[str, Any] = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =SqueezeBertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, dim=37 )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =SqueezeBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
a__ =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 3) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=1E-4 ) )
| 188 |
import os
import string
import sys
lowerCamelCase = 1 << 8
lowerCamelCase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCamelCase = KEYMAP['''up''']
lowerCamelCase = KEYMAP['''left''']
if sys.platform == "win32":
lowerCamelCase = []
lowerCamelCase = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase = ord(str(i))
def UpperCAmelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
a__ ='''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
a__ =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
a__ =chr(KEYMAP['''esc'''] )
except KeyError:
a__ =cha[1]
else:
a__ =ch.decode(_A )
else:
a__ =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ =sys.stdin.fileno()
a__ =termios.tcgetattr(_A )
try:
tty.setraw(_A )
a__ =sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
a__ =get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
a__ =get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188 | 1 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
def decorator(UpperCAmelCase ):
A = getattr(UpperCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(UpperCAmelCase , """handle_key""" , UpperCAmelCase )
return func
return decorator
def __a ( *UpperCAmelCase ) ->Dict:
"""simple docstring"""
def decorator(UpperCAmelCase ):
A = getattr(UpperCAmelCase , """handle_key""" , [] )
handle += keys
setattr(UpperCAmelCase , """handle_key""" , UpperCAmelCase )
return func
return decorator
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __new__(cls : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
A = super().__new__(cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not hasattr(_lowerCAmelCase , """key_handler""" ):
setattr(_lowerCAmelCase , """key_handler""" , {} )
setattr(_lowerCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A = getattr(_lowerCAmelCase , """handle_key""" , [] )
for key in handled_keys:
A = value
return new_cls
@staticmethod
def A (cls : str ):
A = get_character()
if char != KEYMAP["undefined"]:
A = ord(_lowerCAmelCase )
A = cls.key_handler.get(_lowerCAmelCase )
if handler:
A = char
return handler(cls )
else:
return None
def __a ( cls ) ->Optional[Any]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
A__ : Optional[Any] =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "new" , lowerCAmelCase = None ):
"""simple docstring"""
_lowerCAmelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase ) - valid_terms ) ):
_lowerCAmelCase = f"Invalid search term: {invalid_search_terms}"
raise ValueError(lowerCAmelCase )
_lowerCAmelCase = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_29:
raise requests.HTTPError
_lowerCAmelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase )}
_lowerCAmelCase = {}
for id_ in range(lowerCAmelCase ):
_lowerCAmelCase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 70 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = 'data2vec-audio'
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=16 , lowerCAmelCase__=19 , lowerCAmelCase__=5 , lowerCAmelCase__=0.0_5 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__="sum" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1_500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
lowercase__: int = hidden_size
lowercase__: str = feat_extract_activation
lowercase__: List[Any] = list(lowerCAmelCase__ )
lowercase__: Optional[int] = list(lowerCAmelCase__ )
lowercase__: int = list(lowerCAmelCase__ )
lowercase__: Union[str, Any] = conv_bias
lowercase__: int = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: List[Any] = conv_pos_kernel_size
lowercase__: Optional[Any] = len(self.conv_dim )
lowercase__: List[str] = num_hidden_layers
lowercase__: List[str] = intermediate_size
lowercase__: Tuple = hidden_act
lowercase__: Any = num_attention_heads
lowercase__: Optional[int] = hidden_dropout
lowercase__: List[str] = attention_dropout
lowercase__: int = activation_dropout
lowercase__: Dict = feat_proj_dropout
lowercase__: str = final_dropout
lowercase__: List[str] = layerdrop
lowercase__: str = layer_norm_eps
lowercase__: Union[str, Any] = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: List[str] = mask_time_prob
lowercase__: Tuple = mask_time_length
lowercase__: List[Any] = mask_time_min_masks
lowercase__: Optional[int] = mask_feature_prob
lowercase__: Union[str, Any] = mask_feature_length
lowercase__: List[str] = mask_feature_min_masks
# ctc loss
lowercase__: Union[str, Any] = ctc_loss_reduction
lowercase__: str = ctc_zero_infinity
# adapter
lowercase__: str = add_adapter
lowercase__: List[Any] = adapter_kernel_size
lowercase__: Tuple = adapter_stride
lowercase__: Dict = num_adapter_layers
lowercase__: Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: int = list(lowerCAmelCase__ )
lowercase__: Dict = list(lowerCAmelCase__ )
lowercase__: int = list(lowerCAmelCase__ )
lowercase__: str = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return math.prod(self.conv_stride )
| 196 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Any = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__A : Optional[int] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : Optional[int] = torch.load(snake_case_ ,map_location="""cpu""" )
return sd
def A_ ( snake_case_ : Optional[int] ,snake_case_ : Union[str, Any] ,snake_case_ : int=rename_keys_prefix ):
'''simple docstring'''
UpperCamelCase : List[Any] = OrderedDict()
UpperCamelCase : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
UpperCamelCase : List[str] = key
for name_pair in rename_keys_prefix:
UpperCamelCase : Dict = new_key.replace(name_pair[0] ,name_pair[1] )
UpperCamelCase : Optional[int] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
UpperCamelCase : List[Any] = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def A_ ( snake_case_ : Dict ,snake_case_ : int ):
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
UpperCamelCase : str = """pretraining"""
if "vcr" in checkpoint_path:
UpperCamelCase : Dict = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
UpperCamelCase : Tuple = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
UpperCamelCase : Dict = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
UpperCamelCase : Union[str, Any] = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
UpperCamelCase : Any = {"""visual_embedding_dim""": 5_1_2}
UpperCamelCase : Union[str, Any] = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
UpperCamelCase : Tuple = {"""visual_embedding_dim""": 2_0_4_8}
UpperCamelCase : int = """vqa_advanced"""
elif "vqa" in checkpoint_path:
UpperCamelCase : str = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
UpperCamelCase : Optional[Any] = """vqa"""
elif "nlvr" in checkpoint_path:
UpperCamelCase : Tuple = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
UpperCamelCase : List[str] = """nlvr"""
UpperCamelCase : Any = VisualBertConfig(**snake_case_ )
# Load State Dict
UpperCamelCase : Dict = load_state_dict(snake_case_ )
UpperCamelCase : Dict = get_new_dict(snake_case_ ,snake_case_ )
if model_type == "pretraining":
UpperCamelCase : str = VisualBertForPreTraining(snake_case_ )
elif model_type == "vqa":
UpperCamelCase : int = VisualBertForQuestionAnswering(snake_case_ )
elif model_type == "nlvr":
UpperCamelCase : List[str] = VisualBertForVisualReasoning(snake_case_ )
elif model_type == "multichoice":
UpperCamelCase : List[str] = VisualBertForMultipleChoice(snake_case_ )
model.load_state_dict(snake_case_ )
# Save Checkpoints
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
__A : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 354 |
"""simple docstring"""
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 27 | 0 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 333 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny" )
__lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
__lowercase = bertabert.config.encoder.vocab_size
__lowercase = tokenizer.sep_token_id
__lowercase = tokenizer.cls_token_id
__lowercase = 1_2_8
__lowercase = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]" )
__lowercase = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]" )
__lowercase = train_dataset.select(range(3_2 ) )
__lowercase = val_dataset.select(range(1_6 ) )
__lowercase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase__ : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowercase = tokenizer(batch["article"], padding="max_length", truncation=UpperCAmelCase__, max_length=5_1_2 )
__lowercase = tokenizer(batch["highlights"], padding="max_length", truncation=UpperCAmelCase__, max_length=1_2_8 )
__lowercase = inputs.input_ids
__lowercase = inputs.attention_mask
__lowercase = outputs.input_ids
__lowercase = outputs.input_ids.copy()
__lowercase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
__lowercase = outputs.attention_mask
assert all(len(UpperCAmelCase__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(UpperCAmelCase__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase__ : List[Any] ):
__lowercase = pred.label_ids
__lowercase = pred.predictions
# all unnecessary tokens are removed
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase__ ) )] ) / len(UpperCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
__lowercase = train_dataset.map(
_map_to_encoder_decoder_inputs, batched=UpperCAmelCase__, batch_size=UpperCAmelCase__, remove_columns=["article", "highlights"], )
train_dataset.set_format(
type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], )
# same for validation dataset
__lowercase = val_dataset.map(
_map_to_encoder_decoder_inputs, batched=UpperCAmelCase__, batch_size=UpperCAmelCase__, remove_columns=["article", "highlights"], )
val_dataset.set_format(
type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase__, per_device_train_batch_size=UpperCAmelCase__, per_device_eval_batch_size=UpperCAmelCase__, predict_with_generate=UpperCAmelCase__, evaluation_strategy="steps", do_train=UpperCAmelCase__, do_eval=UpperCAmelCase__, warmup_steps=0, eval_steps=2, logging_steps=2, )
# instantiate trainer
__lowercase = SeqaSeqTrainer(
model=UpperCAmelCase__, args=UpperCAmelCase__, compute_metrics=_compute_metrics, train_dataset=UpperCAmelCase__, eval_dataset=UpperCAmelCase__, tokenizer=UpperCAmelCase__, )
# start training
trainer.train()
| 144 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _A ( ) -> Dict:
'''simple docstring'''
__lowercase = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__lowercase = Dataset.from_dict(UpperCamelCase_)
return dataset
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : int ):
__lowercase = get_dataset()
__lowercase = make_duplicate_clusters(UpperCAmelCase__, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def _lowercase ( self : Any ):
__lowercase = get_dataset()
__lowercase ,__lowercase = deduplicate_dataset(UpperCAmelCase__ )
self.assertEqual(len(UpperCAmelCase__ ), 2 )
print(UpperCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"], 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"], UpperCAmelCase__ )
| 144 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'speech_to_text_2'
__magic_name__ = ['past_key_values']
__magic_name__ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __snake_case=1_0_0_0_0 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=4 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=2 , __snake_case=True , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=1_0_2_4 , **__snake_case , ):
snake_case = vocab_size
snake_case = d_model
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = decoder_layerdrop
snake_case = use_cache
snake_case = decoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = max_target_positions
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
| 127 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 358 |
class __snake_case :
def __init__( self ,snake_case ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = data
lowercase : List[Any] = previous
lowercase : List[str] = next_node
def __str__( self ):
'''simple docstring'''
return f"{self.data}"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.data
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.next
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = head
def __iter__( self ):
'''simple docstring'''
return self
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
lowercase : Union[str, Any] = self.current.get_data()
lowercase : Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : str = None # First node in list
lowercase : str = None # Last node in list
def __str__( self ):
'''simple docstring'''
lowercase : int = self.head
lowercase : str = []
while current is not None:
nodes.append(current.get_data() )
lowercase : Dict = current.get_next()
return " ".join(str(snake_case ) for node in nodes )
def __contains__( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.head
while current:
if current.get_data() == value:
return True
lowercase : Any = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.head is None:
lowercase : Any = node
lowercase : Dict = node
else:
self.insert_before_node(self.head ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.head is None:
self.set_head(snake_case )
else:
self.insert_after_node(self.tail ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = Node(snake_case )
if self.head is None:
self.set_head(snake_case )
else:
self.set_tail(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = node
lowercase : Optional[int] = node.previous
if node.get_previous() is None:
lowercase : Optional[int] = node_to_insert
else:
lowercase : Optional[int] = node_to_insert
lowercase : List[Any] = node_to_insert
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = node
lowercase : List[str] = node.next
if node.get_next() is None:
lowercase : Union[str, Any] = node_to_insert
else:
lowercase : List[str] = node_to_insert
lowercase : Dict = node_to_insert
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = 1
lowercase : List[str] = Node(snake_case )
lowercase : Any = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case ,snake_case )
return
current_position += 1
lowercase : List[Any] = node.next
self.insert_after_node(self.tail ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.head
while node:
if node.get_data() == item:
return node
lowercase : Any = node.get_next()
raise Exception("""Node not found""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if (node := self.get_node(snake_case )) is not None:
if node == self.head:
lowercase : Optional[Any] = self.head.get_next()
if node == self.tail:
lowercase : List[str] = self.tail.get_previous()
self.remove_node_pointers(snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
if node.get_next():
lowercase : Optional[int] = node.previous
if node.get_previous():
lowercase : Union[str, Any] = node.next
lowercase : Union[str, Any] = None
lowercase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.head is None
def _snake_case( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_ (UpperCamelCase__ : Any ):
def is_in_circle(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> bool:
_UpperCAmelCase : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase : str = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple = 0.0 , UpperCamelCase__ : Tuple = 1.0 , ):
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict = 0.0 , UpperCamelCase__ : str = 1.0 ):
def identity_function(UpperCamelCase__ : List[str] ) -> float:
return x
_UpperCAmelCase : Union[str, Any] = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
def function_to_integrate(UpperCamelCase__ : Optional[int] ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase : int = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = 0
for i in range(1, 1001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 337 | 0 |
lowerCamelCase_ = 'Tobias Carryer'
from time import time
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]=int(time() ) ): # noqa: B008
'''simple docstring'''
_A = multiplier
_A = increment
_A = modulo
_A = seed
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCamelCase_ = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 355 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ) -> Tuple:
'''simple docstring'''
_A = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_A = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_A , _A = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_A = parse_unknown_args(__lowercase )
# Run
_A = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 174 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
lowercase_ : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase_ : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowercase_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ : Tuple = value
elif weight_type == "weight_g":
lowercase_ : str = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : Union[str, Any] = value
else:
lowercase_ : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowercase_ : int = []
lowercase_ : List[str] = fairseq_model.state_dict()
lowercase_ : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ : int = None
for name, value in fairseq_dict.items():
lowercase_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ : List[str] = True
elif name.split(""".""" )[0] == "proj":
lowercase_ : Tuple = fairseq_model.proj
lowercase_ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ : List[Any] = True
if "*" in mapped_key:
lowercase_ : str = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
lowercase_ : int = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase_ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase_ : List[Any] = 'weight_v'
elif "bias" in name:
lowercase_ : Optional[Any] = 'bias'
elif "weight" in name:
lowercase_ : Tuple = 'weight'
else:
lowercase_ : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowercase_ : List[str] = full_name.split("""conv_layers.""" )[-1]
lowercase_ : Any = name.split(""".""" )
lowercase_ : List[str] = int(items[0] )
lowercase_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Union[str, Any]:
lowercase_ : List[str] = emb.weight.shape
lowercase_ : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Tuple:
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
lowercase_ : Tuple = [line.split(""" """ )[0] for line in lines]
lowercase_ : int = len(_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , ) -> str:
lowercase_ : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
lowercase_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
lowercase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
lowercase_ : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
lowercase_ : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
lowercase_ : int = False
# add projection layer
lowercase_ : str = nn.Parameter(projection_layer.weight )
lowercase_ : Any = nn.Parameter(projection_layer.bias )
lowercase_ : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hf_wavavec.config.to_dict()
lowercase_ : Tuple = tokenizer.pad_token_id
lowercase_ : Optional[int] = tokenizer.bos_token_id
lowercase_ : Union[str, Any] = tokenizer.eos_token_id
lowercase_ : Tuple = 'speech_to_text_2'
lowercase_ : Tuple = 'wav2vec2'
lowercase_ : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 239 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
def a_ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCamelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 351 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=9_9 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : List[str]=None , ) -> str:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> int:
lowerCAmelCase = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , ) -> Tuple:
lowerCAmelCase = True
lowerCAmelCase = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , ) -> List[str]:
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , ) -> List[str]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase : int = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = FalconModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase , *lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , 'use_cache' ):
return
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase = True
lowerCAmelCase = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase = (
getattr(UpperCAmelCase__ , 'decoder_layers' , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , 'num_decoder_layers' , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase = getattr(UpperCAmelCase__ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase = getattr(UpperCAmelCase__ , 'd_model' , config.hidden_size )
lowerCAmelCase = embed_dim // num_attention_heads
lowerCAmelCase = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = inputs['input_ids'].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=1_9 )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 55 | 0 |
"""simple docstring"""
A__ : Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 144 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase = 10
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int:
for i in range(__UpperCamelCase , __UpperCamelCase ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int:
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = (left + right) // 3 + 1
UpperCAmelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase_ = one_third - 1
elif array[two_third] < target:
UpperCAmelCase_ = two_third + 1
else:
UpperCAmelCase_ = one_third + 1
UpperCAmelCase_ = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = (left + right) // 3 + 1
UpperCAmelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCamelCase , one_third - 1 , __UpperCamelCase , __UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __UpperCamelCase , __UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCamelCase = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase = ite_ternary_search(collection, target)
_lowerCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 177 |
from __future__ import annotations
_lowerCamelCase = list[list[int]]
# assigning initial values to the grid
_lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(__UpperCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase_ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
UpperCAmelCase_ = 0
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(__UpperCamelCase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
_lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 177 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =CustomTokenizer
pass
| 67 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase : Optional[int] = 5_0000
_UpperCAmelCase : Dict = 5000
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__)
_UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(UpperCamelCase__ ):
snake_case_ = dataset[i]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
snake_case_ = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
snake_case_ = dataset[i]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = dataset[i : i + batch_size]
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES}
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
snake_case_ = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
snake_case_ = generate_example_dataset(
os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ )
print('shuffling dataset' )
snake_case_ = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) )
snake_case_ = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 285 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_UpperCAmelCase : Dict = """scheduler_config.json"""
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 5
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase :
UpperCAmelCase__ = SCHEDULER_CONFIG_NAME
UpperCAmelCase__ = ["""dtype"""]
UpperCAmelCase__ = []
UpperCAmelCase__ = True
@classmethod
def A_ ( cls : Tuple , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Any=False , **UpperCAmelCase : List[str] , ) -> Any:
lowerCamelCase__ , lowerCamelCase__ : int = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ , lowerCamelCase__ : str = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
lowerCamelCase__ : Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def A_ ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Dict ) -> Union[str, Any]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def A_ ( self : int ) -> Optional[int]:
return self._get_compatibles()
@classmethod
def A_ ( cls : int ) -> Union[str, Any]:
lowerCamelCase__ : int = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase__ : str = importlib.import_module(__name__.split('.' )[0] )
lowerCamelCase__ : List[str] = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
assert len(_UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCAmelCase ) - x.ndim) ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=0.999 , _UpperCAmelCase=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_UpperCAmelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCamelCase__ : Dict = []
for i in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = i / num_diffusion_timesteps
lowerCamelCase__ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCAmelCase ) / alpha_bar(_UpperCAmelCase ) , _UpperCAmelCase ) )
return jnp.array(_UpperCAmelCase , dtype=_UpperCAmelCase )
@flax.struct.dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@classmethod
def A_ ( cls : List[str] , UpperCAmelCase : Tuple ) -> Any:
lowerCamelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCamelCase__ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase__ : Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase__ : List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowerCamelCase__ : int = 1.0 - betas
lowerCamelCase__ : int = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : List[str] = state.alphas_cumprod
lowerCamelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase__ : Union[str, Any] = sqrt_alpha_prod.flatten()
lowerCamelCase__ : str = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
lowerCamelCase__ : str = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase__ : int = sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase__ : str = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 45 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_UpperCAmelCase ) == 1:
return True
lowerCamelCase__ : List[Any] = series[1] - series[0]
for index in range(len(_UpperCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
lowerCamelCase__ : Any = 0
for val in series:
answer += val
return answer / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _snake_case ( self , lowercase , lowercase , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in predictions] )
lowerCAmelCase = np.array([re.sub(lowercase , """""" , lowercase ) for x in references] )
else:
lowerCAmelCase = np.asarray(lowercase )
lowerCAmelCase = np.asarray(lowercase )
if ignore_case:
lowerCAmelCase = np.char.lower(lowercase )
lowerCAmelCase = np.char.lower(lowercase )
if ignore_punctuation:
lowerCAmelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
if ignore_numbers:
lowerCAmelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = np.char.translate(lowercase , table=lowercase )
lowerCAmelCase = predictions == references
return {"exact_match": np.mean(lowercase ) * 100}
| 46 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _snake_case (self ):
return self._get_superresolution_dummy_components()
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 174 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a : List[Any] = """http://www.mocksite.com/file1.txt"""
_a : Any = """\"text\": [\"foo\", \"foo\"]"""
_a : int = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase :
a : int =2_00
a : Tuple ={"""Content-Length""": """100"""}
a : Optional[Any] ={}
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [bytes(__SCREAMING_SNAKE_CASE,"""utf-8""" )]
def _lowerCAmelCase ( *lowercase , **lowercase ) -> Dict:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> List[str]:
import requests
monkeypatch.setattr(lowercase , """request""" , lowercase )
__lowerCAmelCase = URL
if issubclass(lowercase , lowercase ):
__lowerCAmelCase = url
elif issubclass(lowercase , lowercase ):
__lowerCAmelCase = [url]
elif issubclass(lowercase , lowercase ):
__lowerCAmelCase = {"""train""": url}
__lowerCAmelCase = """dummy"""
__lowerCAmelCase = """downloads"""
__lowerCAmelCase = tmp_path
__lowerCAmelCase = DownloadConfig(
cache_dir=os.path.join(lowercase , lowercase ) , use_etag=lowercase , )
__lowerCAmelCase = DownloadManager(dataset_name=lowercase , download_config=lowercase )
__lowerCAmelCase = dl_manager.download(lowercase )
__lowerCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase , lowercase ):
__lowerCAmelCase = [downloaded_paths]
__lowerCAmelCase = [urls]
elif isinstance(lowercase , lowercase ):
assert "train" in downloaded_paths.keys()
__lowerCAmelCase = downloaded_paths.values()
__lowerCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase , lowercase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCAmelCase = Path(lowercase )
__lowerCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCAmelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCAmelCase = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__lowerCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
__lowerCAmelCase = str(lowercase )
if issubclass(lowercase , lowercase ):
__lowerCAmelCase = filename
elif issubclass(lowercase , lowercase ):
__lowerCAmelCase = [filename]
elif issubclass(lowercase , lowercase ):
__lowerCAmelCase = {"""train""": filename}
__lowerCAmelCase = """dummy"""
__lowerCAmelCase = xz_file.parent
__lowerCAmelCase = """extracted"""
__lowerCAmelCase = DownloadConfig(
cache_dir=lowercase , use_etag=lowercase , )
__lowerCAmelCase = DownloadManager(dataset_name=lowercase , download_config=lowercase )
__lowerCAmelCase = dl_manager.extract(lowercase )
__lowerCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase , lowercase ):
__lowerCAmelCase = [extracted_paths]
__lowerCAmelCase = [paths]
elif isinstance(lowercase , lowercase ):
assert "train" in extracted_paths.keys()
__lowerCAmelCase = extracted_paths.values()
__lowerCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase , lowercase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCAmelCase = Path(lowercase )
__lowerCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase , etag=lowercase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCAmelCase = extracted_path.read_text()
__lowerCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowercase , start=1 ):
__lowerCAmelCase = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[str]:
__lowerCAmelCase = request.getfixturevalue(lowercase )
__lowerCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
_test_jsonl(lowercase , lowercase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = request.getfixturevalue(lowercase )
__lowerCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase ) , start=1 ):
_test_jsonl(lowercase , lowercase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase ) , start=1 ):
assert os.path.basename(lowercase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 364 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( lowercase , lowercase , lowercase = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def _lowerCAmelCase ( lowercase , lowercase , lowercase = 10**-1 ) -> bool:
__lowerCAmelCase = cross(lowercase , lowercase )
__lowerCAmelCase = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Any = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
_a : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : Union[str, Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
_a : Optional[int] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 46 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
lowerCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
lowerCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return float((preds == labels).mean() )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="binary" ):
"""simple docstring"""
lowercase__ = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowercase__ = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ = [(pred, label)]
lowercase__ , lowercase__ = [], []
for question, preds_labels in question_map.items():
lowercase__ , lowercase__ = zip(*SCREAMING_SNAKE_CASE )
lowercase__ = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(SCREAMING_SNAKE_CASE )
lowercase__ = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) )
ems.append(SCREAMING_SNAKE_CASE )
lowercase__ = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) )
lowercase__ = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )
lowercase__ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ , fa_avg='''macro''' )
elif self.config_name == "record":
lowercase__ = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowercase__ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(UpperCamelCase_ , UpperCamelCase_ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 110 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = 13 , UpperCamelCase = 64 , UpperCamelCase = 2 , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 128 , UpperCamelCase=[16, 32, 64, 128] , UpperCamelCase = 7 , UpperCamelCase = 4 , UpperCamelCase = 37 , UpperCamelCase = "gelu" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 10 , UpperCamelCase = 0.02 , UpperCamelCase = 2 , UpperCamelCase = 1 , UpperCamelCase = 128 , UpperCamelCase = [2, 2, 2, 2] , UpperCamelCase = 2 , UpperCamelCase = 2 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = num_attention_outputs
lowerCamelCase_ = embed_dim
lowerCamelCase_ = embed_dim + 1
lowerCamelCase_ = resolution
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = dim
lowerCamelCase_ = mlp_expansion_ratio
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerModel(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerModelTester(self )
lowerCamelCase_ = ConfigTester(
self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCamelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCamelCase_ = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEfficientFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "chunk_length" , UpperCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCamelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def snake_case ( self ):
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase_ = model_class(UpperCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase_ = model(UpperCamelCase )
self.assertTrue(outputs_dict is not None )
def __snake_case ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 55 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_lowerCAmelCase = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = [], []
__magic_name__ = list(zip(a , a ) )
__magic_name__ , __magic_name__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__magic_name__ = new_src + ''' ''' + src
__magic_name__ = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
__magic_name__ , __magic_name__ = src, tgt
else: # can fit, keep adding
__magic_name__ , __magic_name__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def UpperCamelCase ( a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ , __magic_name__ = pack_examples(a , a , a , a )
print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(a , save_path / F'''{split}.source''' )
shutil.copyfile(a , save_path / F'''{split}.target''' )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
__magic_name__ = parser.parse_args()
__magic_name__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
__A = 1.6_0_2_1E-1_9 # units = C
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | """simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
lowercase__: Dict = parent
lowercase__: List[str] = batch_size
lowercase__: Optional[Any] = seq_length
lowercase__: List[Any] = is_training
lowercase__: int = use_attention_mask
lowercase__: Tuple = use_token_type_ids
lowercase__: Union[str, Any] = use_labels
lowercase__: str = vocab_size
lowercase__: str = hidden_size
lowercase__: str = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: List[str] = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: Tuple = hidden_dropout_prob
lowercase__: int = attention_probs_dropout_prob
lowercase__: int = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: List[Any] = type_sequence_label_size
lowercase__: Any = initializer_range
lowercase__: str = num_choices
def _snake_case ( self ):
lowercase__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_attention_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ):
lowercase__: str = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = config_and_inputs
lowercase__: Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ):
lowercase__: str = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: Dict = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_UpperCAmelCase )
lowercase__: int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Any = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__: Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__: List[Any] = model(_UpperCAmelCase )[0]
lowercase__: str = 50000
lowercase__: Tuple = (1, 6, vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 177 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_UpperCAmelCase = '\\n\n'
_UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int = 16 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase_ = "cuda"
else:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase_ = model.config.max_length - 1
else:
UpperCamelCase_ = model.config.max_length
UpperCamelCase_ = tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors="pt" , return_attention_mask=_SCREAMING_SNAKE_CASE , ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = encodings["input_ids"]
UpperCamelCase_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase_ = []
UpperCamelCase_ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = min(start_index + batch_size , len(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = encoded_texts[start_index:end_index]
UpperCamelCase_ = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
UpperCamelCase_ = encoded_batch
with torch.no_grad():
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).logits
UpperCamelCase_ = out_logits[..., :-1, :].contiguous()
UpperCamelCase_ = labels[..., 1:].contiguous()
UpperCamelCase_ = attn_mask[..., 1:].contiguous()
UpperCamelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE )}
| 369 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = "MobileNetV1Config"
# Base docstring
lowercase_ = "google/mobilenet_v1_1.0_224"
lowercase_ = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowercase_ = "google/mobilenet_v1_1.0_224"
lowercase_ = "tabby, tabby cat"
lowercase_ = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]=None ) -> int:
__a = {}
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = model.mobilenet_va
else:
__a = model
__a = '''MobilenetV1/Conv2d_0/'''
__a = backbone.conv_stem.convolution.weight
__a = backbone.conv_stem.normalization.bias
__a = backbone.conv_stem.normalization.weight
__a = backbone.conv_stem.normalization.running_mean
__a = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a = i + 1
__a = i * 2
__a = backbone.layer[pt_index]
__a = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
__a = backbone.layer[pt_index + 1]
__a = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__a = model.classifier.weight
__a = model.classifier.bias
return tf_to_pt_map
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Tuple:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__a = tf.train.list_variables(lowerCAmelCase__ )
__a = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__a = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
__a = array
# Build TF to PyTorch weights loading map
__a = _build_tf_to_pytorch_map(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__a = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__a = np.transpose(lowerCAmelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__a = array.squeeze().transpose()
else:
__a = np.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__a = torch.from_numpy(lowerCAmelCase__ )
tf_weights.pop(lowerCAmelCase__ , lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp''' , lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp_1''' , lowerCAmelCase__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , lowerCAmelCase__ )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( lowerCAmelCase__ : torch.Tensor , lowerCAmelCase__ : nn.Convad ) -> torch.Tensor:
__a , __a = features.shape[-2:]
__a , __a = conv_layer.stride
__a , __a = conv_layer.kernel_size
if in_height % stride_height == 0:
__a = max(kernel_height - stride_height , 0 )
else:
__a = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a = max(kernel_width - stride_width , 0 )
else:
__a = max(kernel_width - (in_width % stride_width) , 0 )
__a = pad_along_width // 2
__a = pad_along_width - pad_left
__a = pad_along_height // 2
__a = pad_along_height - pad_top
__a = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase__ , lowerCAmelCase__ , '''constant''' , 0.0 )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ):
super().__init__()
__a = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__a = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode='''zeros''' , )
if use_normalization:
__a = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
__a = None
if use_activation:
if isinstance(_a , _a ):
__a = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
__a = ACTaFN[config.hidden_act]
else:
__a = config.hidden_act
else:
__a = None
def __UpperCAmelCase ( self , _a ):
if self.config.tf_padding:
__a = apply_tf_padding(_a , self.convolution )
__a = self.convolution(_a )
if self.normalization is not None:
__a = self.normalization(_a )
if self.activation is not None:
__a = self.activation(_a )
return features
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = MobileNetVaConfig
__UpperCAmelCase : Optional[int] = load_tf_weights_in_mobilenet_va
__UpperCAmelCase : Optional[Any] = 'mobilenet_v1'
__UpperCAmelCase : Tuple = 'pixel_values'
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = True ):
super().__init__(_a )
__a = config
__a = 32
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
__a = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a = nn.ModuleList()
for i in range(13 ):
__a = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
__a = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , ):
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__a = self.conv_stem(_a )
__a = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a = layer_module(_a )
if output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = hidden_states
if self.pooler is not None:
__a = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
__a = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = config.num_labels
__a = MobileNetVaModel(_a )
__a = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
__a = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier(self.dropout(_a ) )
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = '''single_label_classification'''
else:
__a = '''multi_label_classification'''
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(_a , _a )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 45 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = None
__UpperCAmelCase : str = field(default='Translation' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCAmelCase ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[List] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[str] = None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] = "dict"
__UpperCAmelCase : ClassVar[Any] = None
__UpperCAmelCase : str = field(default='TranslationVariableLanguages' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self ):
__a = sorted(set(self.languages ) ) if self.languages else None
__a = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __UpperCAmelCase ( self , _a ):
__a = set(self.languages )
if self.languages and set(_a ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({', '.join(_a )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a = []
for lang, text in translation_dict.items():
if isinstance(_a , _a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a = zip(*sorted(_a ) )
return {"language": languages, "translation": translations}
def __UpperCAmelCase ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 45 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : Optional[int] = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowerCamelCase_ : Optional[bool] = field(
default=_a, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowerCamelCase_ : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : str = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ : int = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = predict_dataset.features['''label'''].names
# Labels
snake_case_ : int = len(_UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , idalabel={str(_UpperCamelCase ): label for i, label in enumerate(_UpperCamelCase )} , labelaid={label: i for i, label in enumerate(_UpperCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ : str = False
def preprocess_function(_UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_UpperCamelCase , max_length=data_args.max_seq_length , truncation=_UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : List[Any] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
snake_case_ : int = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ : Optional[int] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
snake_case_ : List[str] = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ : List[str] = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , data_args.max_predict_samples )
snake_case_ : Dict = predict_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ : List[str] = predict_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
snake_case_ : int = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
snake_case_ : List[str] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
snake_case_ : Tuple = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ : Optional[int] = default_data_collator
elif training_args.fpaa:
snake_case_ : Any = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
snake_case_ : Any = None
# Initialize our Trainer
snake_case_ : Any = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case_ : int = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : Dict = last_checkpoint
snake_case_ : int = trainer.train(resume_from_checkpoint=_UpperCamelCase )
snake_case_ : Union[str, Any] = train_result.metrics
snake_case_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Dict = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ : Any = trainer.evaluate(eval_dataset=_UpperCamelCase )
snake_case_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
snake_case_ : str = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = trainer.predict(_UpperCamelCase , metric_key_prefix='''predict''' )
snake_case_ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Optional[int] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''predict''' , _UpperCamelCase )
trainer.save_metrics('''predict''' , _UpperCamelCase )
snake_case_ : List[Any] = np.argmax(_UpperCamelCase , axis=1 )
snake_case_ : Optional[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_UpperCamelCase ):
snake_case_ : List[str] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 279 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger("""transformers.models.encodec""")
__lowerCamelCase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__lowerCamelCase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__lowerCamelCase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__lowerCamelCase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__lowerCamelCase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__lowerCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowerCamelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowerCamelCase = []
__lowerCamelCase = []
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
for attribute in key.split("." ):
snake_case : Optional[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : str = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Tuple = value
elif weight_type == "weight_g":
snake_case : Any = value
elif weight_type == "weight_v":
snake_case : int = value
elif weight_type == "bias":
snake_case : Dict = value
elif weight_type == "running_mean":
snake_case : int = value
elif weight_type == "running_var":
snake_case : List[str] = value
elif weight_type == "num_batches_tracked":
snake_case : Tuple = value
elif weight_type == "weight_ih_l0":
snake_case : Optional[Any] = value
elif weight_type == "weight_hh_l0":
snake_case : Dict = value
elif weight_type == "bias_ih_l0":
snake_case : Optional[Any] = value
elif weight_type == "bias_hh_l0":
snake_case : int = value
elif weight_type == "weight_ih_l1":
snake_case : List[str] = value
elif weight_type == "weight_hh_l1":
snake_case : int = value
elif weight_type == "bias_ih_l1":
snake_case : Optional[int] = value
elif weight_type == "bias_hh_l1":
snake_case : Optional[int] = value
else:
snake_case : List[Any] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case , snake_case : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Union[str, Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case : Any = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case : Any = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__lowerCamelCase , __lowerCamelCase ):
logger.info(f"""{name} was ignored""" )
continue
snake_case : List[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case , snake_case : Any = key.split(".*." )
if prefix in name and suffix in name:
snake_case : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
snake_case : List[Any] = True
if "*" in mapped_key:
snake_case : Optional[int] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Optional[Any] = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : List[str] = "weight_g"
elif "weight_v" in name:
snake_case : int = "weight_v"
elif "weight_ih_l0" in name:
snake_case : Dict = "weight_ih_l0"
elif "weight_hh_l0" in name:
snake_case : Optional[Any] = "weight_hh_l0"
elif "bias_ih_l0" in name:
snake_case : Optional[Any] = "bias_ih_l0"
elif "bias_hh_l0" in name:
snake_case : int = "bias_hh_l0"
elif "weight_ih_l1" in name:
snake_case : Union[str, Any] = "weight_ih_l1"
elif "weight_hh_l1" in name:
snake_case : List[str] = "weight_hh_l1"
elif "bias_ih_l1" in name:
snake_case : Optional[int] = "bias_ih_l1"
elif "bias_hh_l1" in name:
snake_case : Tuple = "bias_hh_l1"
elif "bias" in name:
snake_case : Tuple = "bias"
elif "weight" in name:
snake_case : List[str] = "weight"
elif "running_mean" in name:
snake_case : str = "running_mean"
elif "running_var" in name:
snake_case : Optional[int] = "running_var"
elif "num_batches_tracked" in name:
snake_case : int = "num_batches_tracked"
else:
snake_case : List[Any] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , ):
if config_path is not None:
snake_case : Optional[Any] = EncodecConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : Optional[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case : List[Any] = [8, 5, 4, 4]
snake_case : Optional[int] = [2.2]
snake_case : Optional[int] = 64
snake_case : Any = 32000
snake_case : Dict = 2048
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = False
elif model_name == "encodec_48khz":
snake_case : List[str] = [8, 5, 4, 2]
snake_case : Optional[Any] = [3.0, 6.0, 12.0, 24.0]
snake_case : List[str] = 48000
snake_case : Optional[Any] = 2
snake_case : Optional[int] = False
snake_case : Dict = "time_group_norm"
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = 1.0
snake_case : List[str] = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
snake_case : List[Any] = EncodecModel(__lowerCamelCase )
snake_case : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__lowerCamelCase )
snake_case : Tuple = torch.load(__lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case : Any = original_checkpoint["best_state"]
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Optional[int] = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : Tuple =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : int =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
a__ : Any =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Union[str, Any] =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : List[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : Tuple =16
elif accelerator.mixed_precision != "no":
a__ : str =8
else:
a__ : List[Any] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Tuple =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : Any =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Optional[Any] =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
a__ : Optional[int] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
a__ : Union[str, Any] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Tuple =int(config["num_epochs"] )
a__ : Optional[Any] =int(config["seed"] )
a__ : Optional[Any] =int(config["batch_size"] )
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : List[Any] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : Optional[int] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : List[Any] =batch_size // MAX_GPU_BATCH_SIZE
a__ : List[Any] =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[Any] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[str] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Dict =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : List[str] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
a__ : Any =os.path.split(SCREAMING_SNAKE_CASE )[-1].split("." )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
a__ : List[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : List[Any] =model(**SCREAMING_SNAKE_CASE )
a__ : Tuple =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
a__ : Dict =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
a__ : Optional[Any] =model(**SCREAMING_SNAKE_CASE )
a__ : int =outputs.logits.argmax(dim=-1 )
a__ , a__ : int =accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Optional[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE ),
"epoch": epoch,
} , step=SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _A ( ):
"""simple docstring"""
a__ : Dict =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=SCREAMING_SNAKE_CASE , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
a__ : List[str] =parser.parse_args()
a__ : Optional[Any] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 148 |
from __future__ import annotations
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num <= 0:
a__ : List[str] =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =[True] * (num + 1)
a__ : Union[str, Any] =[]
a__ : str =2
a__ : Any =int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
a__ : Optional[int] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 148 | 1 |
"""simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase__ : List[str] =logging.get_logger(__name__)
class __A ( a ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 262 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a , unittest.TestCase ):
__A = BioGptTokenizer
__A = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase ="""lower newer"""
lowerCamelCase ="""lower newer"""
return input_text, output_text
def _snake_case ( self ):
lowerCamelCase =BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase ="""lower"""
lowerCamelCase =["""low""", """er</w>"""]
lowerCamelCase =tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =tokens + ["""<unk>"""]
lowerCamelCase =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
@slow
def _snake_case ( self ):
lowerCamelCase =BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase =tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase =tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
lowerCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 262 | 1 |
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Tuple ):
A_ = data
A_ = None
def __repr__( self : Union[str, Any] ):
return f'''Node({self.data})'''
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] ):
A_ = None
def __iter__( self : List[Any] ):
A_ = self.head
while node:
yield node.data
A_ = node.next
def __len__( self : List[str] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self : Dict , UpperCAmelCase : Dict ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A_ = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
A_ = current.next
A_ = data
def __A ( self : str , UpperCAmelCase : Union[str, Any] ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def __A ( self : Dict , UpperCAmelCase : str ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def __A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A_ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
A_ = new_node
elif index == 0:
A_ = self.head # link new_node to head
A_ = new_node
else:
A_ = self.head
for _ in range(index - 1 ):
A_ = temp.next
A_ = temp.next
A_ = new_node
def __A ( self : Dict ): # print every node data
print(self )
def __A ( self : str ):
return self.delete_nth(0 )
def __A ( self : Union[str, Any] ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __A ( self : Any , UpperCAmelCase : Dict = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A_ = self.head # default first node
if index == 0:
A_ = self.head.next
else:
A_ = self.head
for _ in range(index - 1 ):
A_ = temp.next
A_ = temp.next
A_ = temp.next.next
return delete_node.data
def __A ( self : Optional[Any] ):
return self.head is None
def __A ( self : Tuple ):
A_ = None
A_ = self.head
while current:
# Store the current node's next node.
A_ = current.next
# Make the current node's next point backwards
A_ = prev
# Make the previous node be the current node
A_ = current
# Make the current node the next node (to progress iteration)
A_ = next_node
# Return prev in order to put the head at the end
A_ = prev
def __snake_case ( ):
"""simple docstring"""
A_ = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
A_ = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def __snake_case ( ):
"""simple docstring"""
A_ = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
A_ = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A_ = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A_ = linked_list.delete_tail()
assert result == 12.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A_ = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __snake_case ( ):
"""simple docstring"""
from doctest import testmod
testmod()
A_ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__UpperCamelCase )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
A_ = input("Enter New Value: " ).strip()
print("New list:" )
print(__UpperCamelCase )
print(f'''length of linked_list is : {len(__UpperCamelCase )}''' )
if __name__ == "__main__":
main() | 312 |
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
__UpperCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = []
for current_row_idx in range(snake_case ):
__UpperCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]:
'''simple docstring'''
__UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None:
'''simple docstring'''
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase = above_to_left_elt + above_to_right_elt
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = [[1]]
for row_index in range(1 , snake_case ):
__UpperCamelCase = [0] + result[-1] + [0]
__UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase = sum(divmod(snake_case , 2 ) )
__UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def A_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : Optional[Any] = get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
os.path.join(snake_case__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCAmelCase : Dict = Extractor
def a ( self , snake_case__ ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCAmelCase : Any = os.path.abspath(snake_case__ )
return os.path.join(self.extract_dir , hash_url_to_filename(snake_case__ ) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(snake_case__ ) and not (os.path.isdir(snake_case__ ) and os.listdir(snake_case__ ))
)
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
_lowerCAmelCase : str = self.extractor.infer_extractor_format(snake_case__ )
if not extractor_format:
return input_path
_lowerCAmelCase : Optional[Any] = self._get_output_path(snake_case__ )
if self._do_extract(snake_case__ , snake_case__ ):
self.extractor.extract(snake_case__ , snake_case__ , snake_case__ )
return output_path
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@classmethod
@abstractmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
...
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = []
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , 'rb' ) as f:
return f.read(snake_case__ )
@classmethod
def a ( cls , snake_case__ , snake_case__ = b"" ):
'''simple docstring'''
if not magic_number:
_lowerCAmelCase : List[Any] = max(len(snake_case__ ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCAmelCase : List[str] = cls.read_magic_number(snake_case__ , snake_case__ )
except OSError:
return False
return any(magic_number.startswith(snake_case__ ) for cls_magic_number in cls.magic_numbers )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
return tarfile.is_tarfile(snake_case__ )
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
def resolved(snake_case__ ) -> str:
return os.path.realpath(os.path.abspath(snake_case__ ) )
def badpath(snake_case__ , snake_case__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(snake_case__ , snake_case__ ) ).startswith(snake_case__ )
def badlink(snake_case__ , snake_case__ ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCAmelCase : Dict = resolved(os.path.join(snake_case__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=snake_case__ )
_lowerCAmelCase : int = resolved(snake_case__ )
for finfo in members:
if badpath(finfo.name , snake_case__ ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(snake_case__ , snake_case__ ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(snake_case__ , snake_case__ ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_lowerCAmelCase : Dict = tarfile.open(snake_case__ )
tar_file.extractall(snake_case__ , members=TarExtractor.safemembers(snake_case__ , snake_case__ ) )
tar_file.close()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"\x1F\x8B"]
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
with gzip.open(snake_case__ , 'rb' ) as gzip_file:
with open(snake_case__ , 'wb' ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def a ( cls , snake_case__ , snake_case__ = b"" ):
'''simple docstring'''
if super().is_extractable(snake_case__ , magic_number=snake_case__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(snake_case__ , 'rb' ) as fp:
_lowerCAmelCase : Any = _EndRecData(snake_case__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCAmelCase : List[str] = fp.read(snake_case__ ) # CD is where we expect it to be
if len(snake_case__ ) == sizeCentralDir:
_lowerCAmelCase : Union[str, Any] = struct.unpack(snake_case__ , snake_case__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with zipfile.ZipFile(snake_case__ , 'r' ) as zip_file:
zip_file.extractall(snake_case__ )
zip_file.close()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
with lzma.open(snake_case__ ) as compressed_file:
with open(snake_case__ , 'wb' ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_lowerCAmelCase : Any = rarfile.RarFile(snake_case__ )
rf.extractall(snake_case__ )
rf.close()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
_lowerCAmelCase : Optional[int] = zstd.ZstdDecompressor()
with open(snake_case__ , 'rb' ) as ifh, open(snake_case__ , 'wb' ) as ofh:
dctx.copy_stream(snake_case__ , snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"\x42\x5A\x68"]
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
with bza.open(snake_case__ , 'rb' ) as compressed_file:
with open(snake_case__ , 'wb' ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with pyazr.SevenZipFile(snake_case__ , 'r' ) as archive:
archive.extractall(snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = [b"\x04\x22\x4D\x18"]
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(snake_case__ , 'rb' ) as compressed_file:
with open(snake_case__ , 'wb' ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a ( cls ):
'''simple docstring'''
return max(
len(snake_case__ )
for extractor in cls.extractors.values()
if issubclass(snake_case__ , snake_case__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(snake_case__ , magic_number_length=snake_case__ )
except OSError:
return b""
@classmethod
def a ( cls , snake_case__ , snake_case__ = False ):
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=snake_case__ , )
_lowerCAmelCase : Optional[int] = cls.infer_extractor_format(snake_case__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a ( cls , snake_case__ ): # <Added version="2.4.0"/>
'''simple docstring'''
_lowerCAmelCase : Any = cls._get_magic_number_max_length()
_lowerCAmelCase : Tuple = cls._read_magic_number(snake_case__ , snake_case__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(snake_case__ , magic_number=snake_case__ ):
return extractor_format
@classmethod
def a ( cls , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(snake_case__ ) , exist_ok=snake_case__ )
# Prevent parallel extractions
_lowerCAmelCase : Optional[Any] = str(Path(snake_case__ ).with_suffix('.lock' ) )
with FileLock(snake_case__ ):
shutil.rmtree(snake_case__ , ignore_errors=snake_case__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(snake_case__ , snake_case__ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=snake_case__ , )
_lowerCAmelCase : Tuple = extractor if extractor != 'deprecated' else extractor_format
else:
_lowerCAmelCase : int = cls.extractors[extractor_format]
return extractor.extract(snake_case__ , snake_case__ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=snake_case__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(snake_case__ ):
return extractor.extract(snake_case__ , snake_case__ )
| 25 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Tuple = CLIPTokenizer
lowerCamelCase_ : Tuple = CLIPTokenizerFast
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : Any = False
def lowerCamelCase (self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case_ : Dict = {'''unk_token''': '''<unk>'''}
snake_case_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , **__magic_name__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Dict = '''lower newer'''
snake_case_ : Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Union[str, Any] = '''lower newer'''
snake_case_ : Union[str, Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case_ : Union[str, Any] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : int = tokens + [tokenizer.unk_token]
snake_case_ : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@require_ftfy
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[str] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case_ : Any = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : List[Any] = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ : Optional[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case_ : int = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : Any = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of space type
snake_case_ : int = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ : List[str] = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : Optional[int] = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ : int = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ : str = tokenizer_s.tokenize(__magic_name__ )
snake_case_ : Tuple = tokenizer_r.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : str = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
snake_case_ : Any = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
snake_case_ : Any = F''' {text}'''
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , )
snake_case_ : Optional[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
with self.assertRaises(__magic_name__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
pass
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 1 |
from __future__ import annotations
a ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class A_ :
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : dict[str, list[str]] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase : dict[str, str | None] = {}
__lowerCamelCase : str = source_vertex
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = {self.source_vertex}
__lowerCamelCase : Any = None
__lowerCamelCase : List[Any] = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase : str = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCamelCase)
__lowerCamelCase : Any = vertex
queue.append(_lowerCamelCase)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase : Any = self.parent.get(_lowerCamelCase)
if target_vertex_parent is None:
__lowerCamelCase : Union[str, Any] = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_lowerCamelCase)
return self.shortest_path(_lowerCamelCase) + F"->{target_vertex}"
if __name__ == "__main__":
a =Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 354 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
if len(lowerCamelCase__ ) == 0:
return False
__lowerCamelCase : List[Any] = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
a =input("""Enter numbers separated by comma:\n""").strip()
a =[int(item.strip()) for item in user_input.split(""",""")]
a =int(input("""Enter the number to be found in the list:\n""").strip())
a ="""""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 113 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.