code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 0, 0
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 5
for _ in range(1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = min(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
ugly_nums.append(__lowerCamelCase )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 299
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 1
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A__ ( ):
SCREAMING_SNAKE_CASE_ = [randint(-10_00, 10_00 ) for i in range(10 )]
SCREAMING_SNAKE_CASE_ = randint(-50_00, 50_00 )
return (arr, r)
__UpperCAmelCase = make_dataset()
def A__ ( __lowerCamelCase, __lowerCamelCase ):
for triplet in permutations(__lowerCamelCase, 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
arr.sort()
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
for i in range(n - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
SCREAMING_SNAKE_CASE_ = '''
triplet_sum1(*dataset)
'''
SCREAMING_SNAKE_CASE_ = '''
triplet_sum2(*dataset)
'''
SCREAMING_SNAKE_CASE_ = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=1_00_00 )
SCREAMING_SNAKE_CASE_ = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=1_00_00 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 299
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="trocr"
UpperCAmelCase_ =["past_key_values"]
UpperCAmelCase_ ={
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , _A=50265 , _A=1024 , _A=12 , _A=16 , _A=4096 , _A="gelu" , _A=512 , _A=0.1 , _A=0.0 , _A=0.0 , _A=2 , _A=0.02 , _A=0.0 , _A=True , _A=False , _A=True , _A=True , _A=1 , _A=0 , _A=2 , **_A , ) -> int:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = scale_embedding
SCREAMING_SNAKE_CASE_ = use_learned_position_embeddings
SCREAMING_SNAKE_CASE_ = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 299
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 1
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="new-model"
if is_tf_available():
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =NewModelConfig
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = '''bert-base-cased'''
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''bert-base-cased'''
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForMaskedLM.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def _UpperCamelCase ( self ) -> Union[str, Any]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14410 )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14410 )
def _UpperCamelCase ( self ) -> int:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A , _A )
SCREAMING_SNAKE_CASE_ = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE_ = ['''FunnelBaseModel''']
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> int:
try:
AutoConfig.register('''new-model''' , _A )
SCREAMING_SNAKE_CASE_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE_ = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE_ = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _UpperCamelCase ( self ) -> Any:
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''bert-base''' )
def _UpperCamelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' )
def _UpperCamelCase ( self ) -> str:
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self ) -> List[str]:
with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ):
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def _UpperCamelCase ( self ) -> Any:
# Make sure we have cached the model.
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 1
|
def A__ ( __lowerCamelCase = 1_00_00_00 ):
SCREAMING_SNAKE_CASE_ = [i - 1 for i in range(limit + 1 )]
for i in range(2, limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i, limit + 1, __lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 299
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 1
|
import argparse
import json
from tqdm import tqdm
def A__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''', type=__lowerCamelCase, default='''biencoder-nq-dev.json''', help='''Path to raw DPR training data''', )
parser.add_argument(
'''--evaluation_set''', type=__lowerCamelCase, help='''where to store parsed evaluation_set file''', )
parser.add_argument(
'''--gold_data_path''', type=__lowerCamelCase, help='''where to store parsed gold_data_path file''', )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
with open(args.src_path, '''r''' ) as src_file, open(args.evaluation_set, '''w''' ) as eval_file, open(
args.gold_data_path, '''w''' ) as gold_file:
SCREAMING_SNAKE_CASE_ = json.load(__lowerCamelCase )
for dpr_record in tqdm(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = dpr_record['''question''']
SCREAMING_SNAKE_CASE_ = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__lowerCamelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 299
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 1
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__UpperCAmelCase = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
__UpperCAmelCase = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__UpperCAmelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def A__ ( __lowerCamelCase ):
return x[0]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = get_letter_count(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = ''''''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE_ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = get_frequency_order(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 1
|
import qiskit
def A__ ( __lowerCamelCase = 2 ):
SCREAMING_SNAKE_CASE_ = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__lowerCamelCase, __lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1, __lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1, __lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCamelCase ) ), list(range(__lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(__lowerCamelCase, __lowerCamelCase, shots=10_00 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 299
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 1
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ) -> int:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = position_biased_input
SCREAMING_SNAKE_CASE_ = pos_att_type
SCREAMING_SNAKE_CASE_ = scope
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> List[Any]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self , _A ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A )[0]
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A )[0]
SCREAMING_SNAKE_CASE_ = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ =(
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ =True
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = DebertaVaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _UpperCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def _UpperCamelCase ( self ) -> int:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 299
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 1
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =XLMRobertaTokenizer
UpperCAmelCase_ =XLMRobertaTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = '''<pad>'''
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1002 )
def _UpperCamelCase ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer(_A , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _UpperCamelCase ( self ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def _UpperCamelCase ( self ) -> List[Any]:
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer(f.name , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = pickle.dumps(_A )
pickle.loads(_A )
def _UpperCamelCase ( self ) -> str:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = '''Hello World!'''
SCREAMING_SNAKE_CASE_ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
SCREAMING_SNAKE_CASE_ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def _UpperCamelCase ( self ) -> List[str]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 299
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="deit"
def __init__( self , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1E-12 , _A=224 , _A=16 , _A=3 , _A=True , _A=16 , **_A , ) -> Dict:
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = encoder_stride
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =version.parse("1.11" )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
return 1E-4
| 299
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 1
|
def A__ ( __lowerCamelCase ):
return "".join(chr(ord(__lowerCamelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 299
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
import math
def A__ ( __lowerCamelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__lowerCamelCase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( __lowerCamelCase = 1_00_01 ):
try:
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
while len(__lowerCamelCase ) < nth:
if is_prime(__lowerCamelCase ):
primes.append(__lowerCamelCase )
num += 1
else:
num += 1
return primes[len(__lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE_ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE_ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', __lowerCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = '''weight'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE_ = int(items[1] )
else:
SCREAMING_SNAKE_CASE_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
SCREAMING_SNAKE_CASE_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(
__lowerCamelCase, add_adapter=__lowerCamelCase, adapter_stride=__lowerCamelCase, adapter_kernel_size=__lowerCamelCase, use_auth_token=__lowerCamelCase, output_hidden_size=__lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = MBartConfig.from_pretrained(__lowerCamelCase )
# load model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
SCREAMING_SNAKE_CASE_ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase, use_auth_token=__lowerCamelCase )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE_ = WavaVecaModel(__lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder, __lowerCamelCase )
# load decoder weights
SCREAMING_SNAKE_CASE_ = MBartForCausalLM(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__lowerCamelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderModel(encoder=__lowerCamelCase, decoder=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = '''mbart50'''
SCREAMING_SNAKE_CASE_ = '''wav2vec2'''
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = 25_00_04
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_ = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 299
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=3 , _A=True , _A=True , _A=0.1 , _A=0.1 , _A=224 , _A=1000 , _A=[3, 3, 6, 4] , _A=[48, 56, 112, 220] , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = layer_depths
SCREAMING_SNAKE_CASE_ = embed_dims
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_A , layer_scale_init_value=1E-5 , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _UpperCamelCase ( self , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ =(
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> str:
pass
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _UpperCamelCase ( self ) -> Tuple:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
def _UpperCamelCase ( self ) -> List[str]:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = 8
self.assertEqual(len(_A ) , _A ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_A ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Any:
def _config_zero_init(_A ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_A )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_A , _A , 1E-10 )
if isinstance(getattr(_A , _A , _A ) , _A ):
SCREAMING_SNAKE_CASE_ = _config_zero_init(getattr(_A , _A ) )
setattr(_A , _A , _A )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Dict:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_A )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = job['''started_at''']
SCREAMING_SNAKE_CASE_ = job['''completed_at''']
SCREAMING_SNAKE_CASE_ = date_parser.parse(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = date_parser.parse(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = duration_in_min
return job_info
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase, headers=__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(__lowerCamelCase ) for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''', headers=__lowerCamelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(__lowerCamelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = get_job_time(args.workflow_run_id)
__UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 299
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 1
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCAmelCase = pytest.mark.integration
__UpperCAmelCase = {"comet"}
__UpperCAmelCase = importlib.util.find_spec("fairseq") is not None
__UpperCAmelCase = {"code_eval"}
__UpperCAmelCase = os.name == "nt"
__UpperCAmelCase = {"bertscore", "frugalscore", "perplexity"}
__UpperCAmelCase = importlib.util.find_spec("transformers") is not None
def A__ ( __lowerCamelCase ):
@wraps(__lowerCamelCase )
def wrapper(self, __lowerCamelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self, __lowerCamelCase )
return wrapper
def A__ ( __lowerCamelCase ):
@wraps(__lowerCamelCase )
def wrapper(self, __lowerCamelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self, __lowerCamelCase )
return wrapper
def A__ ( __lowerCamelCase ):
@wraps(__lowerCamelCase )
def wrapper(self, __lowerCamelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self, __lowerCamelCase )
return wrapper
def A__ ( ):
SCREAMING_SNAKE_CASE_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@local
class UpperCamelCase__ ( parameterized.TestCase ):
"""simple docstring"""
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def _UpperCamelCase ( self , _A ) -> int:
SCREAMING_SNAKE_CASE_ = '''[...]'''
SCREAMING_SNAKE_CASE_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _A ) ).module_path )
SCREAMING_SNAKE_CASE_ = datasets.load.import_main_class(metric_module.__name__ , dataset=_A )
# check parameters
SCREAMING_SNAKE_CASE_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_A , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE_ = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _UpperCamelCase ( self , _A ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''[...]'''
SCREAMING_SNAKE_CASE_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _A ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE_ = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _UpperCamelCase ( self , _A , _A ) -> List[str]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_A ):
yield
else:
yield
@contextmanager
def _UpperCamelCase ( self ) -> Dict:
def load_local_metric(_A , *_A , **_A ):
return load_metric(os.path.join('''metrics''' , _A ) , *_A , **_A )
with patch('''datasets.load_metric''' ) as mock_load_metric:
SCREAMING_SNAKE_CASE_ = load_local_metric
yield
@classmethod
def _UpperCamelCase ( cls , _A ) -> Tuple:
def wrapper(_A ):
SCREAMING_SNAKE_CASE_ = contextmanager(_A )
SCREAMING_SNAKE_CASE_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def A__ ( __lowerCamelCase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''', '''''', '''''' ) # handle pytest cli flags
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( self , _A ) -> Tuple:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def A__ ( __lowerCamelCase ):
import torch
def bert_cos_score_idf(__lowerCamelCase, __lowerCamelCase, *__lowerCamelCase, **__lowerCamelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def A__ ( __lowerCamelCase ):
def load_from_checkpoint(__lowerCamelCase ):
class UpperCamelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , _A , *_A , **_A ) -> int:
assert len(_A ) == 2
SCREAMING_SNAKE_CASE_ = [0.19, 0.92]
return scores, sum(_A ) / len(_A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
SCREAMING_SNAKE_CASE_ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE_ = load_from_checkpoint
yield
def A__ ( ):
SCREAMING_SNAKE_CASE_ = load_metric(os.path.join('''metrics''', '''seqeval''' ) )
SCREAMING_SNAKE_CASE_ = '''ERROR'''
SCREAMING_SNAKE_CASE_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__lowerCamelCase, match=re.escape(__lowerCamelCase ) ):
metric.compute(predictions=[], references=[], scheme=__lowerCamelCase )
| 299
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =(DDPMScheduler,)
def _UpperCamelCase ( self , **_A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_A )
return config
def _UpperCamelCase ( self ) -> Optional[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def _UpperCamelCase ( self ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def _UpperCamelCase ( self ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def _UpperCamelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def _UpperCamelCase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def _UpperCamelCase ( self ) -> Tuple:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ = len(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(_A , _A )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_A ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ = len(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(_A , _A )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_A ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
SCREAMING_SNAKE_CASE_ = -1
else:
SCREAMING_SNAKE_CASE_ = timesteps[i + 1]
SCREAMING_SNAKE_CASE_ = scheduler.previous_timestep(_A )
SCREAMING_SNAKE_CASE_ = prev_t.item()
self.assertEqual(_A , _A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE_ = len(_A )
with self.assertRaises(_A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_A )
SCREAMING_SNAKE_CASE_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_A )
| 299
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="pegasus"
UpperCAmelCase_ =["past_key_values"]
UpperCAmelCase_ ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _A=50265 , _A=1024 , _A=12 , _A=4096 , _A=16 , _A=12 , _A=4096 , _A=16 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="gelu" , _A=1024 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=0 , _A=False , _A=0 , _A=1 , _A=1 , **_A , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
@property
def _UpperCamelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self ) -> int:
return self.d_model
| 299
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *_A , **_A ) -> None:
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 299
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=6 , _A=17 , _A=23 , _A=11 , _A=True , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = act_dim
SCREAMING_SNAKE_CASE_ = state_dim
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = is_training
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
SCREAMING_SNAKE_CASE_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
SCREAMING_SNAKE_CASE_ = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE_ = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
SCREAMING_SNAKE_CASE_ = random_attention_mask((self.batch_size, self.seq_length) )
SCREAMING_SNAKE_CASE_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _UpperCamelCase ( self ) -> Union[str, Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = DecisionTransformerModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , _A , _A , _A , _A , _A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase_ =()
UpperCAmelCase_ ={"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = DecisionTransformerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _UpperCamelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = DecisionTransformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_A )] , _A )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = 2 # number of steps of autoregressive prediction we will perform
SCREAMING_SNAKE_CASE_ = 10 # defined by the RL environment, may be normalized
SCREAMING_SNAKE_CASE_ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
SCREAMING_SNAKE_CASE_ = model.to(_A )
SCREAMING_SNAKE_CASE_ = model.config
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ) # env.reset()
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=_A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
SCREAMING_SNAKE_CASE_ = state
SCREAMING_SNAKE_CASE_ = torch.zeros(1 , 0 , config.act_dim , device=_A , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = torch.zeros(1 , 0 , device=_A , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = torch.tensor(0 , device=_A , dtype=torch.long ).reshape(1 , 1 )
for step in range(_A ):
SCREAMING_SNAKE_CASE_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_A )] , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat([rewards, torch.zeros(1 , 1 , device=_A )] , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model(
states=_A , actions=_A , rewards=_A , returns_to_go=_A , timesteps=_A , attention_mask=_A , return_dict=_A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ),
1.0,
False,
{},
)
SCREAMING_SNAKE_CASE_ = action_pred[0, -1]
SCREAMING_SNAKE_CASE_ = torch.cat([states, state] , dim=1 )
SCREAMING_SNAKE_CASE_ = returns_to_go[0, -1] - reward
SCREAMING_SNAKE_CASE_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=_A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 299
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 1
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
__UpperCAmelCase = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__UpperCAmelCase = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = cached_file(_A , _A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_A , _A ) ) )
with open(os.path.join(_A , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
self.assertEqual(_A , os.path.join(_A , '''snapshots''' , _A , _A ) )
self.assertTrue(os.path.isfile(_A ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE_ = cached_file(_A , _A )
self.assertEqual(_A , _A )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE_ = cached_file(_A , _A , revision='''9b8c223''' )
self.assertEqual(_A , os.path.join(_A , '''snapshots''' , _A , _A ) )
def _UpperCamelCase ( self ) -> List[str]:
with self.assertRaisesRegex(_A , '''is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE_ = cached_file('''tiny-random-bert''' , _A )
with self.assertRaisesRegex(_A , '''is not a valid git identifier''' ):
SCREAMING_SNAKE_CASE_ = cached_file(_A , _A , revision='''aaaa''' )
with self.assertRaisesRegex(_A , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE_ = cached_file(_A , '''conf''' )
def _UpperCamelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(_A , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE_ = cached_file(_A , '''conf''' )
with open(os.path.join(_A , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(_A , '''.no_exist''' , _A , '''conf''' ) ) )
SCREAMING_SNAKE_CASE_ = cached_file(_A , '''conf''' , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
SCREAMING_SNAKE_CASE_ = cached_file(_A , '''conf''' , local_files_only=_A , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head:
SCREAMING_SNAKE_CASE_ = cached_file(_A , '''conf''' , _raise_exceptions_for_connection_errors=_A )
self.assertIsNone(_A )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ) -> int:
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) )
def _UpperCamelCase ( self ) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_A , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_A , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _A , revision='''ahaha''' )
SCREAMING_SNAKE_CASE_ = get_file_from_repo('''bert-base-cased''' , _A )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE_ = json.loads(open(_A , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _UpperCamelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = Path(_A ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_A , '''a.txt''' ) , str(_A ) )
self.assertIsNone(get_file_from_repo(_A , '''b.txt''' ) )
| 299
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 1
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 1
|
import functools
from typing import Any
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(
isinstance(__lowerCamelCase, __lowerCamelCase ) and len(__lowerCamelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = '''WORD_KEEPER'''
for word in words:
SCREAMING_SNAKE_CASE_ = trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie_node[c]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCamelCase ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE_ = trie
for i in range(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = trie_node.get(string[i], __lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCamelCase, __lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 1
|
from __future__ import annotations
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(range(len(__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE_ = [v / w for v, w in zip(__lowerCamelCase, __lowerCamelCase )]
index.sort(key=lambda __lowerCamelCase : ratio[i], reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [0] * len(__lowerCamelCase )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE_ = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 1
|
from __future__ import annotations
import math
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__lowerCamelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE_ = math.log(len(__lowerCamelCase ), 2 )
print('''Optimal value : ''', end='''''' )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _A , _A , _A = None , _A = None ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = vocab
SCREAMING_SNAKE_CASE_ = merges
SCREAMING_SNAKE_CASE_ = BytePairTokenizer(_A , _A , sequence_length=_A )
@classmethod
def _UpperCamelCase ( cls , _A , *_A , **_A ) -> Any:
SCREAMING_SNAKE_CASE_ = [''' '''.join(_A ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
return cls(_A , _A , *_A , **_A )
@classmethod
def _UpperCamelCase ( cls , _A , *_A , **_A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(_A , *_A , **_A )
return cls.from_tokenizer(_A , *_A , **_A )
@classmethod
def _UpperCamelCase ( cls , _A ) -> Tuple:
return cls(**_A )
def _UpperCamelCase ( self ) -> Optional[int]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _UpperCamelCase ( self , _A , _A = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tf_tokenizer(_A )
SCREAMING_SNAKE_CASE_ = tf.ones_like(_A )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pad_model_inputs(
_A , max_seq_length=_A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 299
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =""
UpperCAmelCase_ ="hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , _A = None , _A = None , **_A , ) -> List[Any]:
super().__init__(self , **_A )
SCREAMING_SNAKE_CASE_ = repo_info
SCREAMING_SNAKE_CASE_ = token
SCREAMING_SNAKE_CASE_ = None
def _UpperCamelCase ( self ) -> Union[str, Any]:
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_A ): {'''name''': str(_A ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCamelCase ( self , _A , _A = "rb" , **_A , ) -> str:
if not isinstance(self.repo_info , _A ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE_ = hf_hub_url(self.repo_info.id , _A , revision=self.repo_info.sha )
return fsspec.open(
_A , mode=_A , headers=get_authentication_headers_for_url(_A , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _UpperCamelCase ( self , _A , **_A ) -> List[Any]:
self._get_dirs()
SCREAMING_SNAKE_CASE_ = self._strip_protocol(_A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_A )
def _UpperCamelCase ( self , _A , _A=False , **_A ) -> str:
self._get_dirs()
SCREAMING_SNAKE_CASE_ = PurePosixPath(path.strip('''/''' ) )
SCREAMING_SNAKE_CASE_ = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_ = PurePosixPath(p.strip('''/''' ) )
SCREAMING_SNAKE_CASE_ = p.parent
if root == path:
SCREAMING_SNAKE_CASE_ = f
SCREAMING_SNAKE_CASE_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 299
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 1
|
from math import isclose, sqrt
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = point_y / 4 / point_x
SCREAMING_SNAKE_CASE_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE_ = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE_ = x_minus if isclose(__lowerCamelCase, __lowerCamelCase ) else x_plus
SCREAMING_SNAKE_CASE_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def A__ ( __lowerCamelCase = 1.4, __lowerCamelCase = -9.6 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = first_x_coord
SCREAMING_SNAKE_CASE_ = first_y_coord
SCREAMING_SNAKE_CASE_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = next_point(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["ConvNextFeatureExtractor"]
__UpperCAmelCase = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 299
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A )
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
SCREAMING_SNAKE_CASE_ = model.generate(_A , max_new_tokens=10 , do_sample=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_ = TextStreamer(_A )
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_ = cs.out[:-1]
self.assertEqual(_A , _A )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A )
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
SCREAMING_SNAKE_CASE_ = model.generate(_A , max_new_tokens=10 , do_sample=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE_ = TextIteratorStreamer(_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
SCREAMING_SNAKE_CASE_ = Thread(target=model.generate , kwargs=_A )
thread.start()
SCREAMING_SNAKE_CASE_ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A )
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
SCREAMING_SNAKE_CASE_ = model.generate(_A , max_new_tokens=10 , do_sample=_A )
SCREAMING_SNAKE_CASE_ = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_ = TextStreamer(_A , skip_prompt=_A )
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_ = cs.out[:-1]
self.assertEqual(_A , _A )
def _UpperCamelCase ( self ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''distilgpt2''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_A )
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = torch.ones((1, 5) , device=_A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_ = TextStreamer(_A , skip_special_tokens=_A )
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_ = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_ = tokenizer(_A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A )
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
SCREAMING_SNAKE_CASE_ = TextIteratorStreamer(_A , timeout=0.001 )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
SCREAMING_SNAKE_CASE_ = Thread(target=model.generate , kwargs=_A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = ''''''
for new_text in streamer:
streamer_text += new_text
| 299
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 1
|
__UpperCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 299
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def _UpperCamelCase ( *_A , **_A ) -> Optional[int]:
pass
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCamelCase ( self , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCamelCase ( self , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _A )
import datasets
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
@slow
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE_ = pipeline('''depth-estimation''' , model=_A )
SCREAMING_SNAKE_CASE_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE_ = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def _UpperCamelCase ( self ) -> Dict:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 299
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =RobertaConfig
UpperCAmelCase_ ="roberta"
def __init__( self , _A ) -> int:
super().__init__(_A )
SCREAMING_SNAKE_CASE_ = RobertaEmbeddings(_A )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =RobertaConfig
UpperCAmelCase_ ="roberta"
def __init__( self , _A ) -> int:
super().__init__(_A )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = config.num_hidden_layers
SCREAMING_SNAKE_CASE_ = DeeRobertaModel(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_A )
def _UpperCamelCase ( self , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , _A=-1 , _A=False , ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_layers
try:
SCREAMING_SNAKE_CASE_ = self.roberta(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
SCREAMING_SNAKE_CASE_ = outputs[1]
SCREAMING_SNAKE_CASE_ = self.dropout(_A )
SCREAMING_SNAKE_CASE_ = self.classifier(_A )
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE_ = e.message
SCREAMING_SNAKE_CASE_ = e.exit_layer
SCREAMING_SNAKE_CASE_ = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE_ = entropy(_A )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ = MSELoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE_ = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE_ = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ = MSELoss()
SCREAMING_SNAKE_CASE_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
SCREAMING_SNAKE_CASE_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE_ = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 299
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = r'''\w+[.]\d+'''
SCREAMING_SNAKE_CASE_ = re.findall(__lowerCamelCase, __lowerCamelCase )
for pat in pats:
SCREAMING_SNAKE_CASE_ = key.replace(__lowerCamelCase, '''_'''.join(pat.split('''.''' ) ) )
return key
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=42 ):
# Step 1: Convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE_ = flax_model.init_weights(PRNGKey(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE_ = rename_key(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rename_key_and_reshape_tensor(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE_ = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
| 299
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=16 , _A=36 , _A=6 , _A=6 , _A=6 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = embedding_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_hidden_groups
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Dict:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = AlbertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = AlbertForPreTraining(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = AlbertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = AlbertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = AlbertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = AlbertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = AlbertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ =(
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ =True
def _UpperCamelCase ( self , _A , _A , _A=False ) -> Tuple:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = AlbertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_A )
@slow
def _UpperCamelCase ( self ) -> int:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = AlbertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = AlbertModel.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) )
| 299
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , **_A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = kwargs.pop('''padding_side''' , '''right''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''return_attention_mask''' , _A )
super().__init__(**_A )
def _UpperCamelCase ( self , _A , _A = True , _A = None , _A = False , _A = None , _A = None , _A = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_A ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE_ = required_input[0]
if isinstance(_A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_A ):
SCREAMING_SNAKE_CASE_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_A ):
SCREAMING_SNAKE_CASE_ = '''tf'''
elif is_torch_tensor(_A ):
SCREAMING_SNAKE_CASE_ = '''pt'''
elif isinstance(_A , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE_ = '''np'''
else:
raise ValueError(
F'''type of {first_element} unknown: {type(_A )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE_ = to_numpy(_A )
else:
SCREAMING_SNAKE_CASE_ = [to_numpy(_A ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE_ = self._get_padding_strategies(padding=_A , max_length=_A )
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE_ = len(_A )
if not all(len(_A ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
SCREAMING_SNAKE_CASE_ = []
for i in range(_A ):
SCREAMING_SNAKE_CASE_ = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE_ = self._truncate(
_A , max_length=_A , pad_to_multiple_of=_A , truncation=_A , )
truncated_inputs.append(_A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE_ = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE_ = {}
for i in range(_A ):
# padding
SCREAMING_SNAKE_CASE_ = self._pad(
truncated_inputs[i] , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE_ = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = value.astype(np.floataa )
batch_outputs[key].append(_A )
return BatchFeature(_A , tensor_type=_A )
def _UpperCamelCase ( self , _A , _A = None , _A = PaddingStrategy.DO_NOT_PAD , _A = None , _A = None , ) -> dict:
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE_ = len(_A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE_ = np.ones(len(_A ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE_ = max_length - len(_A )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
SCREAMING_SNAKE_CASE_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE_ = np.pad(
_A , _A , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
SCREAMING_SNAKE_CASE_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE_ = np.pad(
_A , _A , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def _UpperCamelCase ( self , _A , _A = None , _A = None , _A = None , ) -> Any:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE_ = len(_A ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE_ = processed_features['''attention_mask'''][:max_length]
return processed_features
def _UpperCamelCase ( self , _A=False , _A=None ) -> Optional[int]:
# Get padding strategy
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = PaddingStrategy(_A )
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = padding
else:
SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 299
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__UpperCAmelCase = Mapping[str, np.ndarray]
__UpperCAmelCase = Mapping[str, Any] # Is a nested dict.
__UpperCAmelCase = 0.01
@dataclasses.dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase_ =42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase_ =42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase_ =42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase_ =42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase_ =None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase_ =None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase_ =None
# Chain corresponding to each parent
UpperCAmelCase_ =None
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = r'''(\[[A-Z]+\]\n)'''
SCREAMING_SNAKE_CASE_ = [tag.strip() for tag in re.split(__lowerCamelCase, __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
SCREAMING_SNAKE_CASE_ = zip(tags[0::2], [l.split('''\n''' ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE_ = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE_ = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE_ = '''X''' # FIXME: strings are immutable
SCREAMING_SNAKE_CASE_ = np.array(
[residue_constants.restype_order.get(__lowerCamelCase, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE_ = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase, g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE_ = np.array(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE_ = np.array(list(map({'''-''': 0, '''+''': 1}.get, g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE_ = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase, atom_mask=__lowerCamelCase, aatype=__lowerCamelCase, residue_index=np.arange(len(__lowerCamelCase ) ), b_factors=__lowerCamelCase, )
def A__ ( __lowerCamelCase, __lowerCamelCase = 0 ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE_ = prot.parents
SCREAMING_SNAKE_CASE_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE_ = [p for i, p in zip(__lowerCamelCase, __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
SCREAMING_SNAKE_CASE_ = ['''N/A''']
pdb_headers.append(F'''PARENT {" ".join(__lowerCamelCase )}''' )
return pdb_headers
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = pdb_str.split('''\n''' )
SCREAMING_SNAKE_CASE_ = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE_ = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE_ = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ), [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE_ = parent_dict.get(str(__lowerCamelCase ), ['''N/A'''] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE_ = [['''N/A''']]
def make_parent_line(__lowerCamelCase ) -> str:
return F'''PARENT {" ".join(__lowerCamelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE_ = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE_ = ['''N/A''']
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = residue_constants.restypes + ['''X''']
def res_atoa(__lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r], '''UNK''' )
SCREAMING_SNAKE_CASE_ = residue_constants.atom_types
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = prot.atom_mask
SCREAMING_SNAKE_CASE_ = prot.aatype
SCREAMING_SNAKE_CASE_ = prot.atom_positions
SCREAMING_SNAKE_CASE_ = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE_ = prot.b_factors
SCREAMING_SNAKE_CASE_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
SCREAMING_SNAKE_CASE_ = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = aatype.shape[0]
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = string.ascii_uppercase
SCREAMING_SNAKE_CASE_ = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE_ = '''ATOM'''
SCREAMING_SNAKE_CASE_ = atom_name if len(__lowerCamelCase ) == 4 else F''' {atom_name}'''
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = 1.00
SCREAMING_SNAKE_CASE_ = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = '''A'''
if chain_index is not None:
SCREAMING_SNAKE_CASE_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE_ = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
SCREAMING_SNAKE_CASE_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE_ = '''TER'''
SCREAMING_SNAKE_CASE_ = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase, __lowerCamelCase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, ):
return Protein(
aatype=features['''aatype'''], atom_positions=result['''final_atom_positions'''], atom_mask=result['''final_atom_mask'''], residue_index=features['''residue_index'''] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ), chain_index=__lowerCamelCase, remark=__lowerCamelCase, parents=__lowerCamelCase, parents_chain_index=__lowerCamelCase, )
| 299
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 1
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A__ ( ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 299
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 1
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
while a != 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = b % a, a
return b
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if gcd(__lowerCamelCase, __lowerCamelCase ) != 1:
SCREAMING_SNAKE_CASE_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1, 0, a
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1, m
while va != 0:
SCREAMING_SNAKE_CASE_ = ua // va
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 299
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(_A , _A , _A )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **_A ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def _UpperCamelCase ( self , **_A ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _UpperCamelCase ( self , **_A ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).qformer_tokenizer
def _UpperCamelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
self.assertIsInstance(processor.qformer_tokenizer , _A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = processor(text=_A )
SCREAMING_SNAKE_CASE_ = tokenizer(_A , return_token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = qformer_tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=_A , images=_A )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=_A , images=_A )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 299
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCAmelCase = 12_80_22
__UpperCAmelCase = 12_80_28
@require_sentencepiece
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MaMaaaTokenizer
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Dict:
return (
"This is a test",
"This is a test",
)
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = '''</s>'''
SCREAMING_SNAKE_CASE_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , '''This is a test''' )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ ="facebook/m2m100_418M"
UpperCAmelCase_ =[
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
UpperCAmelCase_ =[
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
UpperCAmelCase_ =[EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def _UpperCamelCase ( cls ) -> int:
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def _UpperCamelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , _A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = '''en'''
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_A , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = '''en'''
SCREAMING_SNAKE_CASE_ = '''fr'''
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
SCREAMING_SNAKE_CASE_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
SCREAMING_SNAKE_CASE_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
SCREAMING_SNAKE_CASE_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 299
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *_A , **_A ) -> None:
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 299
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 1
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A = None , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , **_A , ) -> int:
SCREAMING_SNAKE_CASE_ = path_or_paths
SCREAMING_SNAKE_CASE_ = split if split or isinstance(_A , _A ) else '''train'''
SCREAMING_SNAKE_CASE_ = features
SCREAMING_SNAKE_CASE_ = cache_dir
SCREAMING_SNAKE_CASE_ = keep_in_memory
SCREAMING_SNAKE_CASE_ = streaming
SCREAMING_SNAKE_CASE_ = num_proc
SCREAMING_SNAKE_CASE_ = kwargs
@abstractmethod
def _UpperCamelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A = None , _A = None , _A = False , _A = False , _A = None , **_A , ) -> Dict:
SCREAMING_SNAKE_CASE_ = features
SCREAMING_SNAKE_CASE_ = cache_dir
SCREAMING_SNAKE_CASE_ = keep_in_memory
SCREAMING_SNAKE_CASE_ = streaming
SCREAMING_SNAKE_CASE_ = num_proc
SCREAMING_SNAKE_CASE_ = kwargs
@abstractmethod
def _UpperCamelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 299
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> None:
SCREAMING_SNAKE_CASE_ = pos_x
SCREAMING_SNAKE_CASE_ = pos_y
SCREAMING_SNAKE_CASE_ = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ = goal_x
SCREAMING_SNAKE_CASE_ = goal_y
SCREAMING_SNAKE_CASE_ = g_cost
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = self.calculate_heuristic()
SCREAMING_SNAKE_CASE_ = self.g_cost + self.h_cost
def _UpperCamelCase ( self ) -> float:
SCREAMING_SNAKE_CASE_ = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_A ) + abs(_A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _A ) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
SCREAMING_SNAKE_CASE_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _A )
SCREAMING_SNAKE_CASE_ = [self.start]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
def _UpperCamelCase ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_A )
self.closed_nodes.append(_A )
SCREAMING_SNAKE_CASE_ = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
return [self.start.pos]
def _UpperCamelCase ( self , _A ) -> list[Node]:
SCREAMING_SNAKE_CASE_ = []
for action in delta:
SCREAMING_SNAKE_CASE_ = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def _UpperCamelCase ( self , _A ) -> list[TPosition]:
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> None:
SCREAMING_SNAKE_CASE_ = AStar(_A , _A )
SCREAMING_SNAKE_CASE_ = AStar(_A , _A )
SCREAMING_SNAKE_CASE_ = False
def _UpperCamelCase ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE_ = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE_ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_A , _A )
self.fwd_astar.closed_nodes.append(_A )
self.bwd_astar.closed_nodes.append(_A )
SCREAMING_SNAKE_CASE_ = current_bwd_node
SCREAMING_SNAKE_CASE_ = current_fwd_node
SCREAMING_SNAKE_CASE_ = {
self.fwd_astar: self.fwd_astar.get_successors(_A ),
self.bwd_astar: self.bwd_astar.get_successors(_A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_A )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ = astar.open_nodes.pop(
astar.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_A )
else:
astar.open_nodes.append(_A )
return [self.fwd_astar.start.pos]
def _UpperCamelCase ( self , _A , _A ) -> list[TPosition]:
SCREAMING_SNAKE_CASE_ = self.fwd_astar.retrace_path(_A )
SCREAMING_SNAKE_CASE_ = self.bwd_astar.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 299
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , _A=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = projection_dim
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFDPRContextEncoder(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFDPRQuestionEncoder(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = TFDPRReader(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ ={"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_A )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDPRContextEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDPRContextEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDPRQuestionEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDPRReader.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
SCREAMING_SNAKE_CASE_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE_ = model(_A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 299
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 1
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
SCREAMING_SNAKE_CASE_ = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A = 128 , _A = 256 , _A = 2000.0 , _A = 768 , _A = 12 , _A = 12 , _A = 64 , _A = 2048 , _A = 0.1 , ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Sequential(
nn.Linear(_A , d_model * 4 , bias=_A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_A ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A , bias=_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE_ = DecoderLayer(d_model=_A , d_kv=_A , num_heads=_A , d_ff=_A , dropout_rate=_A )
self.decoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A , bias=_A )
def _UpperCamelCase ( self , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _UpperCamelCase ( self , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE_ = torch.broadcast_to(
torch.arange(_A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE_ = self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.continuous_inputs_projection(_A )
inputs += position_encodings
SCREAMING_SNAKE_CASE_ = self.dropout(_A )
# decoder: No padding present.
SCREAMING_SNAKE_CASE_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE_ = [(x, self.encoder_decoder_mask(_A , _A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE_ = lyr(
_A , conditioning_emb=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )[0]
SCREAMING_SNAKE_CASE_ = self.decoder_norm(_A )
SCREAMING_SNAKE_CASE_ = self.post_dropout(_A )
SCREAMING_SNAKE_CASE_ = self.spec_out(_A )
return spec_out
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A=1E-6 ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A , d_kv=_A , num_heads=_A , dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A , d_kv=_A , num_heads=_A , dropout_rate=_A , layer_norm_epsilon=_A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A , d_ff=_A , dropout_rate=_A , layer_norm_epsilon=_A ) )
def _UpperCamelCase ( self , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.layer[0](
_A , conditioning_emb=_A , attention_mask=_A , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE_ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE_ = self.layer[1](
_A , key_value_states=_A , attention_mask=_A , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE_ = self.layer[-1](_A , _A )
return (hidden_states,)
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_A )
SCREAMING_SNAKE_CASE_ = Attention(query_dim=_A , heads=_A , dim_head=_A , out_bias=_A , scale_qk=_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(_A )
def _UpperCamelCase ( self , _A , _A=None , _A=None , ) -> Optional[int]:
# pre_self_attention_layer_norm
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE_ = self.FiLMLayer(_A , _A )
# Self-attention block
SCREAMING_SNAKE_CASE_ = self.attention(_A )
SCREAMING_SNAKE_CASE_ = hidden_states + self.dropout(_A )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE_ = Attention(query_dim=_A , heads=_A , dim_head=_A , out_bias=_A , scale_qk=_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A , eps=_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(_A )
def _UpperCamelCase ( self , _A , _A=None , _A=None , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
SCREAMING_SNAKE_CASE_ = self.attention(
_A , encoder_hidden_states=_A , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE_ = hidden_states + self.dropout(_A )
return layer_output
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE_ = TaDenseGatedActDense(d_model=_A , d_ff=_A , dropout_rate=_A )
SCREAMING_SNAKE_CASE_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A , eps=_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(_A )
def _UpperCamelCase ( self , _A , _A=None ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE_ = self.film(_A , _A )
SCREAMING_SNAKE_CASE_ = self.DenseReluDense(_A )
SCREAMING_SNAKE_CASE_ = hidden_states + self.dropout(_A )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A , bias=_A )
SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A , bias=_A )
SCREAMING_SNAKE_CASE_ = nn.Linear(_A , _A , bias=_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(_A )
SCREAMING_SNAKE_CASE_ = NewGELUActivation()
def _UpperCamelCase ( self , _A ) -> str:
SCREAMING_SNAKE_CASE_ = self.act(self.wi_a(_A ) )
SCREAMING_SNAKE_CASE_ = self.wi_a(_A )
SCREAMING_SNAKE_CASE_ = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE_ = self.dropout(_A )
SCREAMING_SNAKE_CASE_ = self.wo(_A )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A=1E-6 ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(_A ) )
SCREAMING_SNAKE_CASE_ = eps
def _UpperCamelCase ( self , _A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
SCREAMING_SNAKE_CASE_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def _UpperCamelCase ( self , _A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_A , 3.0 )) ))
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(_A , out_features * 2 , bias=_A )
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.scale_bias(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.chunk(_A , 2 , -1 )
SCREAMING_SNAKE_CASE_ = x * (1 + scale) + shift
return x
| 299
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 1
|
import heapq as hq
import math
from collections.abc import Iterator
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> str:
SCREAMING_SNAKE_CASE_ = str(id_ )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {} # {vertex:distance}
def __lt__( self , _A ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Optional[int]:
return self.id
def _UpperCamelCase ( self , _A ) -> List[Any]:
self.neighbors.append(_A )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = weight
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], __lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1], __lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for u in graph:
SCREAMING_SNAKE_CASE_ = math.inf
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = graph[:]
while q:
SCREAMING_SNAKE_CASE_ = min(__lowerCamelCase )
q.remove(__lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE_ = u
SCREAMING_SNAKE_CASE_ = u.edges[v.id]
for i in range(1, len(__lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A__ ( __lowerCamelCase, __lowerCamelCase ):
for u in graph:
SCREAMING_SNAKE_CASE_ = math.inf
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = list(__lowerCamelCase )
hq.heapify(__lowerCamelCase )
while h:
SCREAMING_SNAKE_CASE_ = hq.heappop(__lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE_ = u
SCREAMING_SNAKE_CASE_ = u.edges[v.id]
hq.heapify(__lowerCamelCase )
for i in range(1, len(__lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="BlipImageProcessor"
UpperCAmelCase_ =("BertTokenizer", "BertTokenizerFast")
def __init__( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = False
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _UpperCamelCase ( self , *_A , **_A ) -> List[str]:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Tuple:
return self.tokenizer.decode(*_A , **_A )
@property
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 299
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_pad
def _UpperCamelCase ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCamelCase ( self , _A , _A=False ) -> List[str]:
if not batched:
SCREAMING_SNAKE_CASE_ = image_inputs[0]
if isinstance(_A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =DetaImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = DetaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _A )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCamelCase ( self ) -> int:
# prepare image and target
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''image_id''': 39769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ = DetaImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
SCREAMING_SNAKE_CASE_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 299
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 299
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 1
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE_ = i
for k in range(i + 1, __lowerCamelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE_ = k
if least != i:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 299
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
__UpperCAmelCase = None
__UpperCAmelCase = {
"7B": 1_10_08,
"13B": 1_38_24,
"30B": 1_79_20,
"65B": 2_20_16,
"70B": 2_86_72,
}
__UpperCAmelCase = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def A__ ( __lowerCamelCase, __lowerCamelCase=1, __lowerCamelCase=2_56 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A__ ( __lowerCamelCase ):
with open(__lowerCamelCase, '''r''' ) as f:
return json.load(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
with open(__lowerCamelCase, '''w''' ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''tmp''' )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = read_json(os.path.join(__lowerCamelCase, '''params.json''' ) )
SCREAMING_SNAKE_CASE_ = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE_ = params['''n_layers''']
SCREAMING_SNAKE_CASE_ = params['''n_heads''']
SCREAMING_SNAKE_CASE_ = n_heads // num_shards
SCREAMING_SNAKE_CASE_ = params['''dim''']
SCREAMING_SNAKE_CASE_ = dim // n_heads
SCREAMING_SNAKE_CASE_ = 1_00_00.0
SCREAMING_SNAKE_CASE_ = 1.0 / (base ** (torch.arange(0, __lowerCamelCase, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE_ = params['''n_kv_heads'''] # for GQA / MQA
SCREAMING_SNAKE_CASE_ = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE_ = n_heads
SCREAMING_SNAKE_CASE_ = n_heads_per_shard
SCREAMING_SNAKE_CASE_ = dim
# permute for sliced rotary
def permute(__lowerCamelCase, __lowerCamelCase=n_heads, __lowerCamelCase=dim, __lowerCamelCase=dim ):
return w.view(__lowerCamelCase, dima // n_heads // 2, 2, __lowerCamelCase ).transpose(1, 2 ).reshape(__lowerCamelCase, __lowerCamelCase )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE_ = torch.load(os.path.join(__lowerCamelCase, '''consolidated.00.pth''' ), map_location='''cpu''' )
else:
# Sharded
SCREAMING_SNAKE_CASE_ = [
torch.load(os.path.join(__lowerCamelCase, F'''consolidated.{i:02d}.pth''' ), map_location='''cpu''' )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {'''weight_map''': {}}
for layer_i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE_ = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE_ = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(__lowerCamelCase )], dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__lowerCamelCase )], dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__lowerCamelCase )], dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__lowerCamelCase )], dim=0 )
SCREAMING_SNAKE_CASE_ = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
SCREAMING_SNAKE_CASE_ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__lowerCamelCase )], dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__lowerCamelCase )], dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
# Write configs
SCREAMING_SNAKE_CASE_ = {'''total_size''': param_count * 2}
write_json(__lowerCamelCase, os.path.join(__lowerCamelCase, '''pytorch_model.bin.index.json''' ) )
SCREAMING_SNAKE_CASE_ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
SCREAMING_SNAKE_CASE_ = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
SCREAMING_SNAKE_CASE_ = LlamaConfig(
hidden_size=__lowerCamelCase, intermediate_size=compute_intermediate_size(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), num_attention_heads=params['''n_heads'''], num_hidden_layers=params['''n_layers'''], rms_norm_eps=params['''norm_eps'''], num_key_value_heads=__lowerCamelCase, )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(__lowerCamelCase, torch_dtype=torch.floataa, low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__lowerCamelCase, safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE_ = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''', help='''Location of LLaMA weights, which contains tokenizer.model and model folders''', )
parser.add_argument(
'''--model_size''', choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''], )
parser.add_argument(
'''--output_dir''', help='''Location to write HF model and tokenizer''', )
parser.add_argument('''--safe_serialization''', type=__lowerCamelCase, help='''Whether or not to save using `safetensors`.''' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
SCREAMING_SNAKE_CASE_ = os.path.join(args.input_dir, '''tokenizer.model''' )
write_tokenizer(args.output_dir, __lowerCamelCase )
if __name__ == "__main__":
main()
| 299
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =KandinskyVaaPriorPipeline
UpperCAmelCase_ =["prompt"]
UpperCAmelCase_ =["prompt", "negative_prompt"]
UpperCAmelCase_ =[
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
UpperCAmelCase_ =False
@property
def _UpperCamelCase ( self ) -> Any:
return 32
@property
def _UpperCamelCase ( self ) -> Tuple:
return 32
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> int:
return 100
@property
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def _UpperCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
SCREAMING_SNAKE_CASE_ = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
SCREAMING_SNAKE_CASE_ = CLIPVisionModelWithProjection(_A )
return model
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.dummy_prior
SCREAMING_SNAKE_CASE_ = self.dummy_image_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ = self.dummy_image_processor
SCREAMING_SNAKE_CASE_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
SCREAMING_SNAKE_CASE_ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def _UpperCamelCase ( self , _A , _A=0 ) -> Any:
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_A ) )
SCREAMING_SNAKE_CASE_ = output.image_embeds
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -10:]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
SCREAMING_SNAKE_CASE_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 299
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "vocab.txt"}
__UpperCAmelCase = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
__UpperCAmelCase = {
"openbmb/cpm-ant-10b": 10_24,
}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
with open(__lowerCamelCase, '''r''', encoding='''utf-8''' ) as reader:
SCREAMING_SNAKE_CASE_ = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = token.rstrip('''\n''' )
SCREAMING_SNAKE_CASE_ = index
return vocab
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A="<unk>" , _A=200 ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = vocab
SCREAMING_SNAKE_CASE_ = unk_token
SCREAMING_SNAKE_CASE_ = max_input_chars_per_word
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
while start < len(_A ):
SCREAMING_SNAKE_CASE_ = len(_A )
SCREAMING_SNAKE_CASE_ = None
while start < end:
SCREAMING_SNAKE_CASE_ = ''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
SCREAMING_SNAKE_CASE_ = end
return sub_tokens
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =VOCAB_FILES_NAMES
UpperCAmelCase_ =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ =["input_ids", "attention_mask"]
UpperCAmelCase_ =False
def __init__( self , _A , _A="<d>" , _A="</d>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A="<unk>" , _A="</n>" , _A="</_>" , _A="left" , **_A , ) -> List[Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
SCREAMING_SNAKE_CASE_ = bod_token
SCREAMING_SNAKE_CASE_ = eod_token
SCREAMING_SNAKE_CASE_ = load_vocab(_A )
SCREAMING_SNAKE_CASE_ = self.encoder[space_token]
SCREAMING_SNAKE_CASE_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return self.encoder[self.bod_token]
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
return self.encoder[self.eod_token]
@property
def _UpperCamelCase ( self ) -> Any:
return self.encoder["\n"]
@property
def _UpperCamelCase ( self ) -> int:
return len(self.encoder )
def _UpperCamelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def _UpperCamelCase ( self , _A , **_A ) -> int:
SCREAMING_SNAKE_CASE_ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def _UpperCamelCase ( self , _A ) -> str:
return token in self.encoder
def _UpperCamelCase ( self , _A ) -> str:
return "".join(_A )
def _UpperCamelCase ( self , _A ) -> Optional[Any]:
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self , _A ) -> Dict:
return self.decoder.get(_A , self.unk_token )
def _UpperCamelCase ( self , _A , _A = None ) -> Tuple[str]:
if os.path.isdir(_A ):
SCREAMING_SNAKE_CASE_ = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
SCREAMING_SNAKE_CASE_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE_ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE_ = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE_ = self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _UpperCamelCase ( self , _A , _A = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCamelCase ( self , _A , _A = None , _A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 299
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 1
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=2 , _A=56 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=2 , _A=2 , _A=7 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=4 , _A="block_sparse" , _A=True , _A=False , _A=2 , _A=3 , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = rescale_embeddings
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = use_bias
SCREAMING_SNAKE_CASE_ = block_size
SCREAMING_SNAKE_CASE_ = num_random_blocks
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Tuple:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Dict:
super().test_hidden_states_output()
@slow
def _UpperCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_A )
def _UpperCamelCase ( self ) -> Tuple:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , _A=None , **_A ):
return model(input_ids=_A , attention_mask=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self , _A , _A , _A , _A=1E-5 , _A="outputs" , _A=None ) -> Dict:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_A , _A , _A , _A , _A , _A )
| 299
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
SCREAMING_SNAKE_CASE_ = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(_A ) , _A )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_A ) , x.transpose() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ) , np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , np.asarray(transpose(_A , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.reshape(_A , (4, 3) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.reshape(_A , (12, 5) ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.asarray(reshape(_A , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.asarray(reshape(_A , (12, 5) ) ) ) )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_A ) , np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.squeeze(_A , axis=2 ) ) )
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ) , np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.asarray(squeeze(_A , axis=2 ) ) ) )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.expand_dims(_A , axis=1 ) ) )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.asarray(expand_dims(_A , axis=1 ) ) ) )
| 299
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 1
|
from math import ceil, sqrt
def A__ ( __lowerCamelCase = 1_00_00_00 ):
SCREAMING_SNAKE_CASE_ = 0
for outer_width in range(3, (limit // 4) + 2 ):
if outer_width**2 > limit:
SCREAMING_SNAKE_CASE_ = max(ceil(sqrt(outer_width**2 - limit ) ), 1 )
else:
SCREAMING_SNAKE_CASE_ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 1
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__UpperCAmelCase = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__UpperCAmelCase = {
"facebook/blenderbot_small-90M": 5_12,
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =VOCAB_FILES_NAMES
UpperCAmelCase_ =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ =BlenderbotSmallTokenizer
def __init__( self , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , _A=True , **_A , ) -> Union[str, Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
SCREAMING_SNAKE_CASE_ = add_prefix_space
def _UpperCamelCase ( self , _A , _A=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , _A , _A = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__UpperCAmelCase = {
"b0": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_24,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_40,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 14_08,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_60,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 15_36,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_00,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 17_92,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_80,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 20_48,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_56,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 23_04,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_28,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 25_60,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_00,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = EfficientNetConfig()
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''hidden_dim''']
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''width_coef''']
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''depth_coef''']
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''image_size''']
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''dropout_rate''']
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''dw_padding''']
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''image_size''']
SCREAMING_SNAKE_CASE_ = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size}, image_mean=[0.4_85, 0.4_56, 0.4_06], image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63], do_center_crop=__lowerCamelCase, )
return preprocessor
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
SCREAMING_SNAKE_CASE_ = sorted(set(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase, range(__lowerCamelCase ) )}
SCREAMING_SNAKE_CASE_ = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
SCREAMING_SNAKE_CASE_ = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
SCREAMING_SNAKE_CASE_ = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE_ = '''efficientnet.''' + item[1]
SCREAMING_SNAKE_CASE_ = '''classifier.weight'''
SCREAMING_SNAKE_CASE_ = '''classifier.bias'''
return key_mapping
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__lowerCamelCase ).permute(3, 2, 0, 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__lowerCamelCase ).permute(2, 3, 0, 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.transpose(__lowerCamelCase ) )
else:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCamelCase )
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = model_classes[model_name](
include_top=__lowerCamelCase, weights='''imagenet''', input_tensor=__lowerCamelCase, input_shape=__lowerCamelCase, pooling=__lowerCamelCase, classes=10_00, classifier_activation='''softmax''', )
SCREAMING_SNAKE_CASE_ = original_model.trainable_variables
SCREAMING_SNAKE_CASE_ = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE_ = param.numpy()
SCREAMING_SNAKE_CASE_ = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE_ = get_efficientnet_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = EfficientNetForImageClassification(__lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
SCREAMING_SNAKE_CASE_ = rename_keys(__lowerCamelCase )
replace_params(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE_ = convert_image_processor(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = preprocessor(images=prepare_img(), return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = hf_model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = CONFIG_MAP[model_name]['''image_size''']
SCREAMING_SNAKE_CASE_ = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE_ = image.img_to_array(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.expand_dims(__lowerCamelCase, axis=0 )
SCREAMING_SNAKE_CASE_ = original_model.predict(__lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCamelCase, __lowerCamelCase, atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCamelCase ):
os.mkdir(__lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCamelCase )
preprocessor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE_ = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowerCamelCase )
hf_model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 299
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 1
|
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
SCREAMING_SNAKE_CASE_ = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__UpperCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 299
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 1
|
def A__ ( __lowerCamelCase = 50 ):
SCREAMING_SNAKE_CASE_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 1
|
def A__ ( __lowerCamelCase = 3, __lowerCamelCase = 7, __lowerCamelCase = 1_00_00_00 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
for current_denominator in range(1, limit + 1 ):
SCREAMING_SNAKE_CASE_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_ = current_numerator
SCREAMING_SNAKE_CASE_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 299
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 1
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', __lowerCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = '''weight'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=True ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE_ = ''''''
if is_finetuned:
SCREAMING_SNAKE_CASE_ = UniSpeechSatForCTC(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = UniSpeechSatForPreTraining(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
SCREAMING_SNAKE_CASE_ = model[0].eval()
recursively_load_weights(__lowerCamelCase, __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 299
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
__UpperCAmelCase = {"mobilebert-uncased": 5_12}
__UpperCAmelCase = {}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =VOCAB_FILES_NAMES
UpperCAmelCase_ =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ =PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ =MobileBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ) -> List[str]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(_A , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**_A )
SCREAMING_SNAKE_CASE_ = do_lower_case
def _UpperCamelCase ( self , _A , _A=None ) -> int:
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , _A , _A = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 299
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "Hello world! cécé herlolip"
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = FairseqRobertaModel.from_pretrained(__lowerCamelCase )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.cfg.model.encoder_embed_dim, num_hidden_layers=roberta.cfg.model.encoder_layers, num_attention_heads=roberta.cfg.model.encoder_attention_heads, intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=5_14, type_vocab_size=1, layer_norm_eps=1E-5, )
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaXLForSequenceClassification(__lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(__lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE_ = layer.attention
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE_ = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['''mnli'''].dense.weight
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['''mnli'''].dense.bias
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['''mnli'''].out_proj.weight
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ = roberta.encode(__lowerCamelCase ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__lowerCamelCase ) )
else:
SCREAMING_SNAKE_CASE_ = roberta.model(__lowerCamelCase )[0]
print(our_output.shape, their_output.shape )
SCREAMING_SNAKE_CASE_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ = torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1E-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(__lowerCamelCase ).mkdir(parents=__lowerCamelCase, exist_ok=__lowerCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__UpperCAmelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 299
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCAmelCase = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__UpperCAmelCase = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__UpperCAmelCase = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _UpperCamelCase ( self , _A , _A , _A = 1 , _A = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_A , hypotheses=_A , min_len=_A , max_len=_A )
}
| 299
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 1
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 16 ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE_ = DatasetDict(
{
'''train''': dataset['''train'''].select(__lowerCamelCase ),
'''validation''': dataset['''train'''].select(__lowerCamelCase ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__lowerCamelCase, max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ = datasets.map(
__lowerCamelCase, batched=__lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ = 8
else:
SCREAMING_SNAKE_CASE_ = None
return tokenizer.pad(
__lowerCamelCase, padding='''longest''', max_length=__lowerCamelCase, pad_to_multiple_of=__lowerCamelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''test'''], shuffle=__lowerCamelCase, collate_fn=__lowerCamelCase, batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# New Code #
SCREAMING_SNAKE_CASE_ = []
# Download the dataset
SCREAMING_SNAKE_CASE_ = load_dataset('''glue''', '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config['''lr''']
SCREAMING_SNAKE_CASE_ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ = MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE_ = kfold.split(np.zeros(datasets['''train'''].num_rows ), datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_fold_dataloaders(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters(), lr=__lowerCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase, num_warmup_steps=1_00, num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.loss
SCREAMING_SNAKE_CASE_ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase, references=__lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', __lowerCamelCase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE_ = []
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__lowerCamelCase, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE_ = torch.cat(__lowerCamelCase, dim=0 )
SCREAMING_SNAKE_CASE_ = torch.stack(__lowerCamelCase, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ = metric.compute(predictions=__lowerCamelCase, references=__lowerCamelCase )
accelerator.print('''Average test metrics from all folds:''', __lowerCamelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__lowerCamelCase, default=__lowerCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''', type=__lowerCamelCase, default=3, help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
main()
| 299
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __get__( self , _A , _A=None ) -> Optional[int]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
SCREAMING_SNAKE_CASE_ = '''__cached_''' + self.fget.__name__
SCREAMING_SNAKE_CASE_ = getattr(_A , _A , _A )
if cached is None:
SCREAMING_SNAKE_CASE_ = self.fget(_A )
setattr(_A , _A , _A )
return cached
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def A__ ( __lowerCamelCase ):
if is_torch_fx_proxy(__lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__lowerCamelCase, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowerCamelCase, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowerCamelCase, (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowerCamelCase, np.ndarray )
def A__ ( __lowerCamelCase ):
return isinstance(__lowerCamelCase, np.ndarray )
def A__ ( __lowerCamelCase ):
return _is_numpy(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
import torch
return isinstance(__lowerCamelCase, torch.Tensor )
def A__ ( __lowerCamelCase ):
return False if not is_torch_available() else _is_torch(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
import torch
return isinstance(__lowerCamelCase, torch.device )
def A__ ( __lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
import torch
if isinstance(__lowerCamelCase, __lowerCamelCase ):
if hasattr(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase )
else:
return False
return isinstance(__lowerCamelCase, torch.dtype )
def A__ ( __lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
import tensorflow as tf
return isinstance(__lowerCamelCase, tf.Tensor )
def A__ ( __lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowerCamelCase, '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__lowerCamelCase )
return type(__lowerCamelCase ) == tf.Tensor
def A__ ( __lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(__lowerCamelCase, jnp.ndarray )
def A__ ( __lowerCamelCase ):
return False if not is_flax_available() else _is_jax(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, (dict, UserDict) ):
return {k: to_py_obj(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase, (list, tuple) ):
return [to_py_obj(__lowerCamelCase ) for o in obj]
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase ).tolist()
elif isinstance(__lowerCamelCase, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A__ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, (dict, UserDict) ):
return {k: to_numpy(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase, (list, tuple) ):
return np.array(__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase )
else:
return obj
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
SCREAMING_SNAKE_CASE_ = getattr(self , class_fields[0].name )
SCREAMING_SNAKE_CASE_ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = first_field.items()
SCREAMING_SNAKE_CASE_ = True
else:
try:
SCREAMING_SNAKE_CASE_ = iter(_A )
SCREAMING_SNAKE_CASE_ = True
except TypeError:
SCREAMING_SNAKE_CASE_ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
SCREAMING_SNAKE_CASE_ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
SCREAMING_SNAKE_CASE_ = element[1]
elif first_field is not None:
SCREAMING_SNAKE_CASE_ = first_field
else:
for field in class_fields:
SCREAMING_SNAKE_CASE_ = getattr(self , field.name )
if v is not None:
SCREAMING_SNAKE_CASE_ = v
def __delitem__( self , *_A , **_A ) -> Any:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[int]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _UpperCamelCase ( self , *_A , **_A ) -> Tuple:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _UpperCamelCase ( self , *_A , **_A ) -> str:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self , _A ) -> Any:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _A , _A ) -> Union[str, Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self , _A , _A ) -> Dict:
# Will raise a KeyException if needed
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def _UpperCamelCase ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@classmethod
def _UpperCamelCase ( cls , _A ) -> int:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="longest"
UpperCAmelCase_ ="max_length"
UpperCAmelCase_ ="do_not_pad"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="pt"
UpperCAmelCase_ ="tf"
UpperCAmelCase_ ="np"
UpperCAmelCase_ ="jax"
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> str:
SCREAMING_SNAKE_CASE_ = context_managers
SCREAMING_SNAKE_CASE_ = ExitStack()
def __enter__( self ) -> Dict:
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self , *_A , **_A ) -> List[Any]:
self.stack.__exit__(*_A , **_A )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = infer_framework(__lowerCamelCase )
if framework == "tf":
SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = model_class.__name__
SCREAMING_SNAKE_CASE_ = infer_framework(__lowerCamelCase )
if framework == "tf":
SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE_ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A__ ( __lowerCamelCase, __lowerCamelCase = "", __lowerCamelCase = "." ):
def _flatten_dict(__lowerCamelCase, __lowerCamelCase="", __lowerCamelCase="." ):
for k, v in d.items():
SCREAMING_SNAKE_CASE_ = str(__lowerCamelCase ) + delimiter + str(__lowerCamelCase ) if parent_key else k
if v and isinstance(__lowerCamelCase, __lowerCamelCase ):
yield from flatten_dict(__lowerCamelCase, __lowerCamelCase, delimiter=__lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
@contextmanager
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
if is_numpy_array(__lowerCamelCase ):
return np.transpose(__lowerCamelCase, axes=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.T if axes is None else array.permute(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.transpose(__lowerCamelCase, perm=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.transpose(__lowerCamelCase, axes=__lowerCamelCase )
else:
raise ValueError(F'''Type not supported for transpose: {type(__lowerCamelCase )}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if is_numpy_array(__lowerCamelCase ):
return np.reshape(__lowerCamelCase, __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.reshape(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.reshape(__lowerCamelCase, __lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.reshape(__lowerCamelCase, __lowerCamelCase )
else:
raise ValueError(F'''Type not supported for reshape: {type(__lowerCamelCase )}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
if is_numpy_array(__lowerCamelCase ):
return np.squeeze(__lowerCamelCase, axis=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(__lowerCamelCase, axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.squeeze(__lowerCamelCase, axis=__lowerCamelCase )
else:
raise ValueError(F'''Type not supported for squeeze: {type(__lowerCamelCase )}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if is_numpy_array(__lowerCamelCase ):
return np.expand_dims(__lowerCamelCase, __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.unsqueeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(__lowerCamelCase, axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.expand_dims(__lowerCamelCase, axis=__lowerCamelCase )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__lowerCamelCase )}.''' )
def A__ ( __lowerCamelCase ):
if is_numpy_array(__lowerCamelCase ):
return np.size(__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.numel()
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.size(__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__lowerCamelCase )}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(__lowerCamelCase, (tuple, list) ):
SCREAMING_SNAKE_CASE_ = [F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
SCREAMING_SNAKE_CASE_ = F'''{repo_id}--{value}'''
return auto_map
def A__ ( __lowerCamelCase ):
for base_class in inspect.getmro(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = base_class.__module__
SCREAMING_SNAKE_CASE_ = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 299
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=[1, 1, 2] , _A=1 , _A=32 , _A=4 , _A=8 , _A=37 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=0.0 , _A=512 , _A=3 , _A=0.02 , _A=3 , _A=4 , _A=None , _A=False , ) -> Dict:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_ = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_ = self.num_hidden_layers + 2
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Dict:
SCREAMING_SNAKE_CASE_ = TFFunnelModel(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TFFunnelModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TFFunnelModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFFunnelBaseModel(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TFFunnelBaseModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TFFunnelBaseModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = TFFunnelForPreTraining(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFFunnelForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFFunnelForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFFunnelForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFFunnelForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A , ) -> int:
SCREAMING_SNAKE_CASE_ = TFFunnelForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ =(
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A )
def _UpperCamelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@require_tf
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFFunnelModelTester(self , base=_A )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A )
def _UpperCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
| 299
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
SCREAMING_SNAKE_CASE_ = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
SCREAMING_SNAKE_CASE_ = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCamelCase ( self , **_A ) -> int:
SCREAMING_SNAKE_CASE_ = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , **_A ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , **_A ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
SCREAMING_SNAKE_CASE_ = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE_ = feature_extractor(_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
SCREAMING_SNAKE_CASE_ = '''This is a test string'''
SCREAMING_SNAKE_CASE_ = processor(text=_A )
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self , _A=(2, 10, 16) , _A=77 ) -> Optional[Any]:
np.random.seed(_A )
return np.random.rand(*_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
SCREAMING_SNAKE_CASE_ = processor.decode(_A )
SCREAMING_SNAKE_CASE_ = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCamelCase ( self , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
SCREAMING_SNAKE_CASE_ = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
SCREAMING_SNAKE_CASE_ = processor.batch_decode(_A , _A )
SCREAMING_SNAKE_CASE_ = list(_A )
with get_context('''fork''' ).Pool() as p:
SCREAMING_SNAKE_CASE_ = decoder.decode_beams_batch(_A , _A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = -20.0
SCREAMING_SNAKE_CASE_ = -4.0
SCREAMING_SNAKE_CASE_ = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
SCREAMING_SNAKE_CASE_ = decoded_processor_out.text
SCREAMING_SNAKE_CASE_ = list(_A )
with get_context('''fork''' ).Pool() as pool:
SCREAMING_SNAKE_CASE_ = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
SCREAMING_SNAKE_CASE_ = [d[0][0] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE_ = [d[0][2] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE_ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1E-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ = 2.0
SCREAMING_SNAKE_CASE_ = 5.0
SCREAMING_SNAKE_CASE_ = -20.0
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
SCREAMING_SNAKE_CASE_ = decoded_processor_out.text
SCREAMING_SNAKE_CASE_ = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
SCREAMING_SNAKE_CASE_ = decoder.decode_beams_batch(
_A , _A , )
SCREAMING_SNAKE_CASE_ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
SCREAMING_SNAKE_CASE_ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE_ = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE_ = os.listdir(_A )
SCREAMING_SNAKE_CASE_ = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = snapshot_download('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE_ = os.listdir(_A )
SCREAMING_SNAKE_CASE_ = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE_ = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE_ = processor_wavaveca(_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ = processor_wavaveca.batch_decode(_A )
SCREAMING_SNAKE_CASE_ = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_decoder()
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [d[key] for d in offsets]
return retrieved_list
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits()[0]
SCREAMING_SNAKE_CASE_ = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
SCREAMING_SNAKE_CASE_ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE_ = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCamelCase ( self ) -> int:
import torch
SCREAMING_SNAKE_CASE_ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
SCREAMING_SNAKE_CASE_ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
SCREAMING_SNAKE_CASE_ = iter(_A )
SCREAMING_SNAKE_CASE_ = next(_A )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
SCREAMING_SNAKE_CASE_ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
SCREAMING_SNAKE_CASE_ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A ).logits.cpu().numpy()
SCREAMING_SNAKE_CASE_ = processor.decode(logits[0] , output_word_offsets=_A )
SCREAMING_SNAKE_CASE_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
SCREAMING_SNAKE_CASE_ = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
SCREAMING_SNAKE_CASE_ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
SCREAMING_SNAKE_CASE_ = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 299
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="vision-encoder-decoder"
UpperCAmelCase_ =True
def __init__( self , **_A ) -> Optional[Any]:
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''encoder''' )
SCREAMING_SNAKE_CASE_ = encoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''decoder''' )
SCREAMING_SNAKE_CASE_ = decoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(_A , **_A )
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(_A , **_A )
SCREAMING_SNAKE_CASE_ = True
@classmethod
def _UpperCamelCase ( cls , _A , _A , **_A ) -> PretrainedConfig:
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =version.parse("1.11" )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
return 1E-4
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def _UpperCamelCase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ) -> Mapping[str, Any]:
import torch
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dummy_input['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE_ = dummy_input.pop('''input_ids''' )
SCREAMING_SNAKE_CASE_ = dummy_input.pop('''attention_mask''' )
SCREAMING_SNAKE_CASE_ = torch.zeros(_A )
return common_inputs
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ) -> None:
pass
def _UpperCamelCase ( self , _A ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def _UpperCamelCase ( self , _A , _A , _A = "default" ) -> OnnxConfig:
SCREAMING_SNAKE_CASE_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A )
| 299
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase = input("Enter image url: ").strip()
print(F"""Downloading image from {url} ...""")
__UpperCAmelCase = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase = soup.find("meta", {"property": "og:image"})["content"]
__UpperCAmelCase = requests.get(image_url).content
__UpperCAmelCase = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 299
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
"""simple docstring"""
pass
| 299
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def A__ ( __lowerCamelCase ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowerCamelCase ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
SCREAMING_SNAKE_CASE_ = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
SCREAMING_SNAKE_CASE_ = PipelineDataFormat.from_str(
format=__lowerCamelCase, output_path=args.output, input_path=args.input, column=args.column if args.column else nlp.default_input_names, overwrite=args.overwrite, )
return RunCommand(__lowerCamelCase, __lowerCamelCase )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = nlp
SCREAMING_SNAKE_CASE_ = reader
@staticmethod
def _UpperCamelCase ( _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=_A , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=_A , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=_A , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=_A , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=_A , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=_A , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=_A , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE_ = nlp(**_A ) if self._reader.is_multi_columns else nlp(_A )
if isinstance(_A , _A ):
outputs.append(_A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE_ = self._reader.save_binary(_A )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(_A )
| 299
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCAmelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 10_00,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCAmelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 10_00,
"block_out_channels": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCAmelCase = {
"sample_size": 2_56,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCAmelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__UpperCAmelCase = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__UpperCAmelCase = {
"num_train_timesteps": 1_51,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def A__ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.skip_connection.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.norm.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.norm.bias''']
SCREAMING_SNAKE_CASE_ = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE_ = checkpoint['''label_emb.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE_ = unet_config['''down_block_types''']
SCREAMING_SNAKE_CASE_ = unet_config['''layers_per_block''']
SCREAMING_SNAKE_CASE_ = unet_config['''attention_head_dim''']
SCREAMING_SNAKE_CASE_ = unet_config['''block_out_channels''']
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = channels_list[0]
for i, layer_type in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = channels_list[i]
SCREAMING_SNAKE_CASE_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE_ = convert_attention(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.downsamplers.0'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
current_layer += 1
SCREAMING_SNAKE_CASE_ = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE_ = '''mid_block.resnets.0'''
SCREAMING_SNAKE_CASE_ = '''middle_block.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''mid_block.attentions.0'''
SCREAMING_SNAKE_CASE_ = '''middle_block.1'''
SCREAMING_SNAKE_CASE_ = convert_attention(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''mid_block.resnets.1'''
SCREAMING_SNAKE_CASE_ = '''middle_block.2'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = unet_config['''up_block_types''']
for i, layer_type in enumerate(__lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer-1}.1'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE_ = convert_attention(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer-1}.2'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = strabool(args.class_cond)
__UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__UpperCAmelCase = None
__UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
__UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 299
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 1
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 1
|
import math
def A__ ( ):
SCREAMING_SNAKE_CASE_ = input('''Enter message: ''' )
SCREAMING_SNAKE_CASE_ = int(input(F'''Enter key [2-{len(__lowerCamelCase ) - 1}]: ''' ) )
SCREAMING_SNAKE_CASE_ = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
SCREAMING_SNAKE_CASE_ = encrypt_message(__lowerCamelCase, __lowerCamelCase )
elif mode.lower().startswith('''d''' ):
SCREAMING_SNAKE_CASE_ = decrypt_message(__lowerCamelCase, __lowerCamelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [''''''] * key
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = col
while pointer < len(__lowerCamelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.ceil(len(__lowerCamelCase ) / key )
SCREAMING_SNAKE_CASE_ = key
SCREAMING_SNAKE_CASE_ = (num_cols * num_rows) - len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [''''''] * num_cols
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
SCREAMING_SNAKE_CASE_ = 0
row += 1
return "".join(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =32
UpperCAmelCase_ =4
UpperCAmelCase_ =4
UpperCAmelCase_ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase_ =False
UpperCAmelCase_ =(320, 640, 1_280, 1_280)
UpperCAmelCase_ =2
UpperCAmelCase_ =8
UpperCAmelCase_ =None
UpperCAmelCase_ =1_280
UpperCAmelCase_ =0.0
UpperCAmelCase_ =False
UpperCAmelCase_ =jnp.floataa
UpperCAmelCase_ =True
UpperCAmelCase_ =0
UpperCAmelCase_ =False
def _UpperCamelCase ( self , _A ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(_A )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A )["params"]
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = down_blocks
# mid
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = list(reversed(_A ) )
SCREAMING_SNAKE_CASE_ = list(reversed(_A ) )
SCREAMING_SNAKE_CASE_ = list(reversed(_A ) )
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = up_blocks
# out
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A=None , _A=None , _A = True , _A = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_A , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(_A , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(_A )
SCREAMING_SNAKE_CASE_ = self.time_embedding(_A )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , _A , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
_A , _A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(_A , _A , _A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = up_block(
_A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE_ = up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
SCREAMING_SNAKE_CASE_ = self.conv_out(_A )
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_A )
| 299
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE ) , "Tatoeba directory does not exist." )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def _UpperCamelCase ( self ) -> int:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 299
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def A__ ( __lowerCamelCase ):
return "".join(sorted(__lowerCamelCase ) )
def A__ ( __lowerCamelCase ):
return word_by_signature[signature(__lowerCamelCase )]
__UpperCAmelCase = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 299
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 1
|
__UpperCAmelCase = 8.314_462 # Unit - J mol-1 K-1
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 299
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *_A , **_A ) -> None:
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 299
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 1
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ), end='''\t''' )
else:
print('''INF''', end='''\t''' )
print()
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE_ = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase, __lowerCamelCase )
return dist, v
if __name__ == "__main__":
__UpperCAmelCase = int(input("Enter number of vertices: "))
__UpperCAmelCase = int(input("Enter number of edges: "))
__UpperCAmelCase = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__UpperCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__UpperCAmelCase = int(input("Enter source:"))
__UpperCAmelCase = int(input("Enter destination:"))
__UpperCAmelCase = float(input("Enter weight:"))
__UpperCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 299
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
SCREAMING_SNAKE_CASE_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def _UpperCamelCase ( self , **_A ) -> int:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , **_A ) -> Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = processor(text=_A )
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(images=_A , visual_prompt=_A )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 299
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.