code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def __magic_name__( __UpperCAmelCase = 1000 ) -> str:
'''simple docstring'''
return sum(e for e in range(3 , lowerCAmelCase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be \"bert\".')
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase__ = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
UpperCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
UpperCamelCase__ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
UpperCamelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[f'''cls.predictions.transform.dense.{w}''']
UpperCamelCase__ = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
from __future__ import annotations
import time
snake_case__ = list[tuple[int, int]]
snake_case__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = parent
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = [self.start]
_lowerCamelCase = False
def UpperCamelCase_ ( self ) -> Path | None:
"""simple docstring"""
while self.node_queue:
_lowerCamelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
_lowerCamelCase = self.get_successors(_SCREAMING_SNAKE_CASE )
for node in successors:
self.node_queue.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase_ ( self , A_ ) -> list[Node]:
"""simple docstring"""
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , _SCREAMING_SNAKE_CASE ) )
return successors
def UpperCamelCase_ ( self , A_ ) -> Path:
"""simple docstring"""
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = BreadthFirstSearch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = BreadthFirstSearch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = False
def UpperCamelCase_ ( self ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCamelCase = self.fwd_bfs.node_queue.pop(0 )
_lowerCamelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCamelCase = True
return self.retrace_bidirectional_path(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = current_bwd_node
_lowerCamelCase = current_fwd_node
_lowerCamelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(_SCREAMING_SNAKE_CASE ),
self.bwd_bfs: self.bwd_bfs.get_successors(_SCREAMING_SNAKE_CASE ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase_ ( self , A_ , A_ ) -> Path:
"""simple docstring"""
_lowerCamelCase = self.fwd_bfs.retrace_path(_SCREAMING_SNAKE_CASE )
_lowerCamelCase = self.bwd_bfs.retrace_path(_SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
_lowerCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case__ = (0, 0)
snake_case__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case__ = time.time()
snake_case__ = BreadthFirstSearch(init, goal)
snake_case__ = bfs.search()
snake_case__ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
snake_case__ = time.time()
snake_case__ = BidirectionalBreadthFirstSearch(init, goal)
snake_case__ = bd_bfs.search()
snake_case__ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from collections.abc import Callable
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = a
_lowerCamelCase = b
if function(A_ ) == 0: # one of the a or b is a root for the function
return a
elif function(A_ ) == 0:
return b
elif (
function(A_ ) * function(A_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
_lowerCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(A_ ) == 0:
return mid
elif function(A_ ) * function(A_ ) < 0:
_lowerCamelCase = mid
else:
_lowerCamelCase = mid
_lowerCamelCase = start + (end - start) / 2.0
return mid
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = TapasConfig.from_json_file(__UpperCAmelCase )
# set absolute/relative position embeddings parameter
_lowerCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowerCamelCase = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
_lowerCamelCase = 4
_lowerCamelCase = True
# hparam_utils.py hparams
_lowerCamelCase = 0.6_6_4_6_9_4
_lowerCamelCase = 0.2_0_7_9_5_1
_lowerCamelCase = 0.1_2_1_1_9_4
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = 0.0_3_5_2_5_1_3
_lowerCamelCase = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowerCamelCase = 4
_lowerCamelCase = False
# hparam_utils.py hparams
_lowerCamelCase = 3_6.4_5_1_9
_lowerCamelCase = 0.9_0_3_4_2_1
_lowerCamelCase = 2_2_2.0_8_8
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = 0.7_6_3_1_4_1
_lowerCamelCase = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "TABFACT":
_lowerCamelCase = TapasForSequenceClassification(config=__UpperCAmelCase )
elif task == "MLM":
_lowerCamelCase = TapasForMaskedLM(config=__UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_lowerCamelCase = TapasModel(config=__UpperCAmelCase )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__UpperCAmelCase )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
_lowerCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(__UpperCAmelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
def __magic_name__( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
stooge(__snake_case , 0 , len(__snake_case ) - 1 )
return arr
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase , _lowerCamelCase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(__snake_case , i + t , (__snake_case) )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
if __name__ == "__main__":
snake_case__ = input('Enter numbers separated by a comma:\n').strip()
snake_case__ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case__ = 25_6047
snake_case__ = 25_6145
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
A_ = NllbTokenizer
A_ = NllbTokenizerFast
A_ = True
A_ = True
A_ = {}
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
if not self.test_seqaseq:
return
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
_lowerCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
_lowerCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
_lowerCamelCase = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_lowerCamelCase = tokenizer.prepare_seqaseq_batch(
lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_lowerCamelCase = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase_ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = [AddedToken('''<special>''' , lstrip=lowerCamelCase_ )]
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ )
_lowerCamelCase = tokenizer_r.encode('''Hey this is a <special> token''' )
_lowerCamelCase = tokenizer_r.encode('''<special>''' , add_special_tokens=lowerCamelCase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
_lowerCamelCase = self.tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ )
_lowerCamelCase = tokenizer_p.encode('''Hey this is a <special> token''' )
_lowerCamelCase = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ = 'facebook/nllb-200-distilled-600M'
A_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
A_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
A_ = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def UpperCamelCase_ ( cls ) -> Any:
"""simple docstring"""
_lowerCamelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
_lowerCamelCase = 1
return cls
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_60_57 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
# fmt: off
_lowerCamelCase = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
_lowerCamelCase = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
_lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
_lowerCamelCase = 10
_lowerCamelCase = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_62_03, 3] )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
_lowerCamelCase = NllbTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCamelCase = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
_lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
_lowerCamelCase = targets["""input_ids"""]
_lowerCamelCase = shift_tokens_right(
lowerCamelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_60_47, 70, 73_56, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_60_57,
} , )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
_lowerCamelCase = False
_lowerCamelCase = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
return "".join(chr(ord(A__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
_lowerCamelCase = F'The input value of [n={number}] has to be > 0'
raise ValueError(lowerCamelCase__ )
else:
_lowerCamelCase = sylvester(number - 1 )
_lowerCamelCase = num - 1
_lowerCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import fire
from utils import calculate_rouge, save_json
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = [x.strip() for x in open(a__ ).readlines()]
_lowerCamelCase = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
_lowerCamelCase = calculate_rouge(a__ , a__ , **a__ )
if save_path is not None:
save_json(a__ , a__ , indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
from __future__ import annotations
import math
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
_lowerCamelCase = u
for i in range(1 , snake_case__ ):
_lowerCamelCase = temp * (u - i)
return temp
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase = []
for _ in range(snake_case__ ):
y.append([] )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
y[i].append(snake_case__ )
_lowerCamelCase = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase = list(map(snake_case__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(snake_case__ ):
_lowerCamelCase = float(input() )
_lowerCamelCase = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , snake_case__ ):
for j in range(n - i ):
_lowerCamelCase = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase = y[0][0]
for i in range(1 , snake_case__ ):
summ += (ucal(snake_case__ , snake_case__ ) * y[0][i]) / math.factorial(snake_case__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
import collections
import os
import re
from pathlib import Path
snake_case__ = 'src/transformers'
# Matches is_xxx_available()
snake_case__ = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
snake_case__ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
snake_case__ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
snake_case__ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
snake_case__ = re.compile(R'^\s*try:')
# Catches a line with else:
snake_case__ = re.compile(R'^\s*else:')
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
_lowerCamelCase = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
_lowerCamelCase = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
_lowerCamelCase = re.findall(r'''\[([^\]]+)\]''' , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
_lowerCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
_lowerCamelCase = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
_lowerCamelCase = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
_lowerCamelCase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
def find_duplicates(__UpperCAmelCase ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase = []
for key in import_dict_objects.keys():
_lowerCamelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
_lowerCamelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase = '''base imports''' if key == '''none''' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __magic_name__( ) -> int:
'''simple docstring'''
_lowerCamelCase = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''__init__.py''' )
_lowerCamelCase = parse_init(__UpperCAmelCase )
if objects is not None:
_lowerCamelCase = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCAmelCase ) )
def __magic_name__( ) -> str:
'''simple docstring'''
_lowerCamelCase = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
_lowerCamelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
_lowerCamelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
snake_case__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def __magic_name__( ) -> List[Any]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
_lowerCamelCase = direct_transformers_import(__UpperCAmelCase )
_lowerCamelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , '''__init__.py''' ) , '''r''' ) as f:
_lowerCamelCase = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , __UpperCAmelCase ) ) )
_lowerCamelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = min(__UpperCAmelCase ) # min() finds the minimum value
_lowerCamelCase = max(__UpperCAmelCase ) # max() finds the maximum value
_lowerCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCamelCase = 0
for count in range(__UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCamelCase = count + min_val
i += 1
def __magic_name__( ) -> str:
'''simple docstring'''
_lowerCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCAmelCase )
print('''Sorted order is:''' , ''' '''.join(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
from collections.abc import Callable
import numpy as np
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_lowerCamelCase = np.zeros((n + 1,) )
_lowerCamelCase = ya
_lowerCamelCase = xa
for k in range(__a ):
_lowerCamelCase = y[k] + step_size * ode_func(__a , y[k] )
_lowerCamelCase = y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
A_ = 'wav2vec2'
def __init__( self , A_=32 , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1E-5 , A_="group" , A_="gelu" , A_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=1_28 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=3_20 , A_=2 , A_=0.1 , A_=1_00 , A_=2_56 , A_=2_56 , A_=0.1 , A_="sum" , A_=False , A_=False , A_=2_56 , A_=(5_12, 5_12, 5_12, 5_12, 15_00) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=5_12 , A_=0 , A_=1 , A_=2 , A_=False , A_=3 , A_=2 , A_=3 , A_=None , A_=None , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = feat_extract_norm
_lowerCamelCase = feat_extract_activation
_lowerCamelCase = list(UpperCAmelCase__ )
_lowerCamelCase = list(UpperCAmelCase__ )
_lowerCamelCase = list(UpperCAmelCase__ )
_lowerCamelCase = conv_bias
_lowerCamelCase = num_conv_pos_embeddings
_lowerCamelCase = num_conv_pos_embedding_groups
_lowerCamelCase = len(self.conv_dim )
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = feat_proj_dropout
_lowerCamelCase = final_dropout
_lowerCamelCase = layerdrop
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = vocab_size
_lowerCamelCase = do_stable_layer_norm
_lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase = apply_spec_augment
_lowerCamelCase = mask_time_prob
_lowerCamelCase = mask_time_length
_lowerCamelCase = mask_time_min_masks
_lowerCamelCase = mask_feature_prob
_lowerCamelCase = mask_feature_length
_lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase = num_codevectors_per_group
_lowerCamelCase = num_codevector_groups
_lowerCamelCase = contrastive_logits_temperature
_lowerCamelCase = feat_quantizer_dropout
_lowerCamelCase = num_negatives
_lowerCamelCase = codevector_dim
_lowerCamelCase = proj_codevector_dim
_lowerCamelCase = diversity_loss_weight
# ctc loss
_lowerCamelCase = ctc_loss_reduction
_lowerCamelCase = ctc_zero_infinity
# adapter
_lowerCamelCase = add_adapter
_lowerCamelCase = adapter_kernel_size
_lowerCamelCase = adapter_stride
_lowerCamelCase = num_adapter_layers
_lowerCamelCase = output_hidden_size or hidden_size
_lowerCamelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase = list(UpperCAmelCase__ )
_lowerCamelCase = list(UpperCAmelCase__ )
_lowerCamelCase = list(UpperCAmelCase__ )
_lowerCamelCase = xvector_output_dim
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ = logging.get_logger(__name__)
snake_case__ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
snake_case__ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
snake_case__ = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = collections.OrderedDict()
_lowerCamelCase = collections.OrderedDict()
_lowerCamelCase = collections.OrderedDict()
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(_lowerCAmelCase ):
_lowerCamelCase = b
_lowerCamelCase = idx
for wd in b:
_lowerCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|startoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , do_clean_text=_lowerCamelCase , **_lowerCamelCase , )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
_lowerCamelCase = do_clean_text
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = load_vocab_and_emoji(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_lowerCamelCase , clean=self.do_clean_text )
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_lowerCamelCase )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = ''''''.join(_lowerCamelCase ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = 0
if os.path.isdir(_lowerCamelCase ):
_lowerCamelCase = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
_lowerCamelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
_lowerCamelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''','''.join(_lowerCamelCase ) + '''\n''' )
index += 1
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _lowerCamelCase )
return vocab_file, emoji_file
class UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = vocab # same as swe
_lowerCamelCase = ids_to_tokens # same as bpe
_lowerCamelCase = emoji
_lowerCamelCase = np.max([len(_lowerCamelCase ) for w in self.vocab.keys()] )
_lowerCamelCase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
_lowerCamelCase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
_lowerCamelCase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
_lowerCamelCase = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_lowerCamelCase = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_lowerCamelCase = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
_lowerCamelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
_lowerCamelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
_lowerCamelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.content_repattera.sub('''<URL>''' , _lowerCamelCase )
_lowerCamelCase = self.content_repattera.sub('''<EMAIL>''' , _lowerCamelCase )
_lowerCamelCase = self.content_repattera.sub('''<TEL>''' , _lowerCamelCase )
_lowerCamelCase = self.content_repattera.sub('''<DATE>''' , _lowerCamelCase )
_lowerCamelCase = self.content_repattera.sub('''<DATE>''' , _lowerCamelCase )
_lowerCamelCase = self.content_repattera.sub('''<PRICE>''' , _lowerCamelCase )
_lowerCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_lowerCamelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def UpperCamelCase_ ( self , A_ , A_=False ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = text.replace(''' ''' , '''<SP>''' )
_lowerCamelCase = text.replace(''' ''' , '''<SP>''' )
_lowerCamelCase = text.replace('''\r\n''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\n''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\r''' , '''<BR>''' )
_lowerCamelCase = text.replace('''\t''' , '''<TAB>''' )
_lowerCamelCase = text.replace('''—''' , '''ー''' )
_lowerCamelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCamelCase = text.replace(_lowerCamelCase , _lowerCamelCase )
if clean:
_lowerCamelCase = self.clean_text(_lowerCamelCase )
def check_simbol(A_ ):
_lowerCamelCase = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 2:
_lowerCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(A_ ):
_lowerCamelCase = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 3:
_lowerCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
_lowerCamelCase = 0
_lowerCamelCase = []
while pos < len(_lowerCamelCase ):
_lowerCamelCase = min(len(_lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
_lowerCamelCase = [] # (token_id, token, pos)
for e in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCamelCase ) > 2:
_lowerCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCamelCase ) > 0:
# the smallest token_id is adopted
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = sorted(_lowerCamelCase , key=lambda A_ : x[0] )[0]
result.append(_lowerCamelCase )
_lowerCamelCase = e
else:
_lowerCamelCase = pos + 1
_lowerCamelCase = text[pos:end]
if check_simbol(_lowerCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(_lowerCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
_lowerCamelCase = end
return result
def UpperCamelCase_ ( self , A_ , A_="\n" ) -> Dict:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
_lowerCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_lowerCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
_lowerCamelCase = ''''''.join(_lowerCamelCase )
return text
| 701
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 0
|
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return 10 - x * x
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if equation(lowerCamelCase_ ) * equation(lowerCamelCase_ ) >= 0:
raise ValueError('''Wrong space!''' )
_lowerCamelCase = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase_ ) * equation(lowerCamelCase_ ) < 0:
_lowerCamelCase = c
else:
_lowerCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 702
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
snake_case__ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
A_ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **A_ ) -> Tuple:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase = deprecated_arg[3:]
_lowerCamelCase = not kwargs.pop(_lowercase )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
_lowerCamelCase = kwargs.pop('''tpu_name''' , self.tpu_name )
_lowerCamelCase = kwargs.pop('''device_idx''' , self.device_idx )
_lowerCamelCase = kwargs.pop('''eager_mode''' , self.eager_mode )
_lowerCamelCase = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowercase )
A_ = field(
default=UpperCAmelCase_ , metadata={'help': 'Name of TPU'} , )
A_ = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
A_ = field(default=UpperCAmelCase_ , metadata={'help': 'Benchmark models in eager model.'} )
A_ = field(
default=UpperCAmelCase_ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
_lowerCamelCase = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase = None
return tpu
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_lowerCamelCase = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_lowerCamelCase = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.n_gpu > 0
| 703
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
# TODO Update this
snake_case__ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase ( snake_case__ ):
'''simple docstring'''
A_ = 'esm'
def __init__( self , A_=None , A_=None , A_=None , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_=0.1 , A_=0.1 , A_=10_26 , A_=0.02 , A_=1E-1_2 , A_="absolute" , A_=True , A_=None , A_=False , A_=False , A_=None , A_=None , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = emb_layer_norm_before
_lowerCamelCase = token_dropout
_lowerCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowerCamelCase = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = EsmFoldConfig(**lowercase_ )
_lowerCamelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowerCamelCase = get_default_vocab_list()
else:
_lowerCamelCase = vocab_list
else:
_lowerCamelCase = None
_lowerCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowercase_ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = super().to_dict()
if isinstance(self.esmfold_config , lowercase_ ):
_lowerCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = None
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 0
A_ = True
A_ = False
A_ = 128
A_ = None
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if self.trunk is None:
_lowerCamelCase = TrunkConfig()
elif isinstance(self.trunk , lowercase_ ):
_lowerCamelCase = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = asdict(self )
_lowerCamelCase = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 48
A_ = 1_024
A_ = 128
A_ = 32
A_ = 32
A_ = 32
A_ = 0
A_ = 0
A_ = False
A_ = 4
A_ = 128
A_ = None
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if self.structure_module is None:
_lowerCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_ ):
_lowerCamelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
_lowerCamelCase = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = asdict(self )
_lowerCamelCase = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 384
A_ = 128
A_ = 16
A_ = 128
A_ = 12
A_ = 4
A_ = 8
A_ = 0.1
A_ = 8
A_ = 1
A_ = 2
A_ = 7
A_ = 10
A_ = 1E-8
A_ = 1E5
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return asdict(self )
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {"vocab_file": "sentencepiece.bpe.model"}
snake_case__ = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
snake_case__ = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
snake_case__ = "▁"
class UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
_lowerCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_lowerCamelCase = len(self.sp_model ) - 1
_lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase = self.sp_model.PieceToId(A_ )
return spm_id if spm_id else self.unk_token_id
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A_ )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = ''''''
_lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
_lowerCamelCase = True
_lowerCamelCase = []
else:
current_sub_tokens.append(A_ )
_lowerCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case__ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = '''hf-internal-testing/tiny-random-t5'''
_lowerCamelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
_lowerCamelCase = tokenizer('''This is me''' , return_tensors='''pt''' )
_lowerCamelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_lowerCamelCase = model.generate(**__UpperCamelCase )
_lowerCamelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_lowerCamelCase = model_reloaded.generate(**__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase ) )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = '''hf-internal-testing/tiny-random-t5'''
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
_lowerCamelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCamelCase ):
model.save_pretrained(__UpperCamelCase )
_lowerCamelCase = model.reverse_bettertransformer()
model.save_pretrained(__UpperCamelCase )
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase__ = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
UpperCamelCase__ = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
UpperCamelCase__ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
UpperCamelCase__ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
UpperCamelCase__ = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
UpperCamelCase__ = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
UpperCamelCase__ = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = randrange(len(__UpperCAmelCase ) ), randrange(len(__UpperCAmelCase ) )
_lowerCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_lowerCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __magic_name__( __UpperCAmelCase = 100 ) -> Union[str, Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(__UpperCAmelCase ))
@pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = PokerHand(__UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
def __magic_name__( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = [PokerHand(__UpperCAmelCase ) for hand in SORTED_HANDS]
_lowerCamelCase = poker_hands.copy()
shuffle(__UpperCAmelCase )
_lowerCamelCase = chain(sorted(__UpperCAmelCase ) )
for index, hand in enumerate(__UpperCAmelCase ):
assert hand == poker_hands[index]
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=__UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __magic_name__( ) -> str:
'''simple docstring'''
_lowerCamelCase = PokerHand('''2C 4S AS 3D 5C''' )
_lowerCamelCase = True
_lowerCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __magic_name__( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = 0
_lowerCamelCase = os.path.abspath(os.path.dirname(__UpperCAmelCase ) )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''poker_hands.txt''' )
with open(__UpperCAmelCase ) as file_hand:
for line in file_hand:
_lowerCamelCase = line[:14].strip()
_lowerCamelCase = line[15:].strip()
_lowerCamelCase = PokerHand(__UpperCAmelCase ), PokerHand(__UpperCAmelCase )
_lowerCamelCase = player.compare_with(__UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case__ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( a__ ):
'''simple docstring'''
A_ = ["image_processor", "tokenizer"]
A_ = "BlipImageProcessor"
A_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_lowerCamelCase = self.tokenizer
_lowerCamelCase = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
# add pixel_values
_lowerCamelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ )
if text is not None:
_lowerCamelCase = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
else:
_lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def UpperCamelCase_ ( self , *A_ , **A_ ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ ( self , *A_ , **A_ ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer.model_input_names
_lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case__ = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
A_ = MBartConfig
A_ = {}
A_ = 'gelu'
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_lowerCamelCase = inputs_dict['''input_ids''']
_lowerCamelCase = input_ids[:1, :]
_lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
_lowerCamelCase = inputs_dict['''head_mask''']
_lowerCamelCase = 1
# first forward pass
_lowerCamelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_lowerCamelCase , _lowerCamelCase = outputs.to_tuple()
_lowerCamelCase = past_key_values[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = TFMBartModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ = [
' UN Chief Says There Is No Military Solution in Syria',
]
A_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
A_ = 'facebook/mbart-large-en-ro'
@cached_property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase_ ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def UpperCamelCase_ ( self , **A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_lowerCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_lowerCamelCase = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase ( _UpperCamelCase ):
'''simple docstring'''
A_ = 'mobilenet_v2'
def __init__( self , A_=3 , A_=2_24 , A_=1.0 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_="relu6" , A_=True , A_=0.8 , A_=0.02 , A_=0.001 , A_=2_55 , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = depth_multiplier
_lowerCamelCase = depth_divisible_by
_lowerCamelCase = min_depth
_lowerCamelCase = expand_ratio
_lowerCamelCase = output_stride
_lowerCamelCase = first_layer_is_expansion
_lowerCamelCase = finegrained_output
_lowerCamelCase = hidden_act
_lowerCamelCase = tf_padding
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = semantic_loss_ignore_index
class UpperCamelCase ( _UpperCamelCase ):
'''simple docstring'''
A_ = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return 1E-4
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = {}
if train_file is not None:
_lowerCamelCase = [train_file]
if eval_file is not None:
_lowerCamelCase = [eval_file]
if test_file is not None:
_lowerCamelCase = [test_file]
_lowerCamelCase = datasets.load_dataset('''csv''' , data_files=UpperCAmelCase__ )
_lowerCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
_lowerCamelCase = features_name.pop(UpperCAmelCase__ )
_lowerCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowerCamelCase = {label: i for i, label in enumerate(UpperCAmelCase__ )}
_lowerCamelCase = tokenizer.model_input_names
_lowerCamelCase = {}
if len(UpperCAmelCase__ ) == 1:
for k in files.keys():
_lowerCamelCase = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) , batched=UpperCAmelCase__ , )
elif len(UpperCAmelCase__ ) == 2:
for k in files.keys():
_lowerCamelCase = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , ) , batched=UpperCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowerCamelCase = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase = labelaid[ex[label_name]]
yield (d, label)
_lowerCamelCase = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowerCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowerCamelCase = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowerCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowerCamelCase = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowerCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case__ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = field(metadata={'help': 'Which column contains the label'} )
A_ = field(default=_a , metadata={'help': 'The path of the training file'} )
A_ = field(default=_a , metadata={'help': 'The path of the development file'} )
A_ = field(default=_a , metadata={'help': 'The path of the test file'} )
A_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ = field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ = field(default=_a , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A_ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCAmelCase__ ) , labelaid=UpperCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowerCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase ) -> Dict:
_lowerCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowerCamelCase = TFTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase = trainer.evaluate()
_lowerCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(UpperCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowerCamelCase = (self.image_size // 32) ** 2
_lowerCamelCase = num_patches + 1
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = ViTHybridModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCamelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = ViTHybridForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCamelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A_ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = ViTHybridModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(__UpperCamelCase )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowerCamelCase = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ViTHybridModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __magic_name__( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**__UpperCamelCase )
# verify the logits
_lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_lowerCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_lowerCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
_lowerCamelCase = model(**__UpperCamelCase )
_lowerCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowerCamelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = AlbertTokenizer
A_ = AlbertTokenizerFast
A_ = True
A_ = True
A_ = True
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = '''this is a test'''
_lowerCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(A_ ) , 3_00_00 )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase = tokenizer.tokenize(A_ )
_lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = AlbertTokenizer(A_ , keep_accents=A_ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 12_89] )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = AlbertTokenizer(A_ )
_lowerCamelCase = tokenizer.encode('''sequence builders''' )
_lowerCamelCase = tokenizer.encode('''multi-sequence build''' )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case__ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
snake_case__ = logging.get_logger(__name__)
def __magic_name__( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_lowerCamelCase = json.loads(snake_case_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_lowerCamelCase = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_lowerCamelCase = json.loads(snake_case_ )
if not mpi_options.get('''sagemaker_mpi_enabled''' , snake_case_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
A_ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , A_ , )
@cached_property
def UpperCamelCase_ ( self ) -> "torch.device":
"""simple docstring"""
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
_lowerCamelCase = torch.device('''cpu''' )
_lowerCamelCase = 0
elif is_sagemaker_model_parallel_available():
_lowerCamelCase = smp.local_rank()
_lowerCamelCase = torch.device('''cuda''' , A_ )
_lowerCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
_lowerCamelCase = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
_lowerCamelCase = torch.device('''cuda''' , self.local_rank )
_lowerCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_lowerCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_lowerCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
_lowerCamelCase = torch.device('''cuda''' , self.local_rank )
_lowerCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(A_ )
return device
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return False
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case__ = [ord(letter) for letter in string.ascii_lowercase]
snake_case__ = {ord(char) for char in VALID_CHARS}
snake_case__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = ''''''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
_lowerCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
for key in product(UpperCamelCase__ , repeat=3 ):
_lowerCamelCase = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __magic_name__( __UpperCAmelCase = "p059_cipher.txt" ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
_lowerCamelCase = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
_lowerCamelCase = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
_lowerCamelCase = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
_lowerCamelCase = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ = TypeVar('T')
class UpperCAmelCase__ ( Generic[T] ):
'''simple docstring'''
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = deque()
_lowerCamelCase = set()
if not n:
_lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_lowerCamelCase = n
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(A_ )
else:
self.dq_store.remove(A_ )
self.dq_store.appendleft(A_ )
self.key_reference.add(A_ )
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(A_ )
def __repr__( self ) -> str:
"""simple docstring"""
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(A_ , '''depth_multiplier''' ) )
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=3 , A_=32 , A_=0.25 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_=True , A_="relu6" , A_=12_80 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ) -> Any:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = depth_multiplier
_lowerCamelCase = depth_divisible_by
_lowerCamelCase = min_depth
_lowerCamelCase = expand_ratio
_lowerCamelCase = tf_padding
_lowerCamelCase = output_stride
_lowerCamelCase = first_layer_is_expansion
_lowerCamelCase = finegrained_output
_lowerCamelCase = hidden_act
_lowerCamelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_lowerCamelCase = classifier_dropout_prob
_lowerCamelCase = use_labels
_lowerCamelCase = is_training
_lowerCamelCase = num_labels
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = MobileNetVaForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = MobileNetVaModelTester(self )
_lowerCamelCase = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
_lowerCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
_lowerCamelCase = outputs.hidden_states
_lowerCamelCase = 16
self.assertEqual(len(A_ ) , A_ )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __magic_name__( ) -> str:
'''simple docstring'''
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(A_ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**A_ )
# verify the logits
_lowerCamelCase = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , A_ )
_lowerCamelCase = torch.tensor([0.2445, -1.1993, 0.1905] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
_lowerCamelCase = model.to(A_ )
_lowerCamelCase = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**A_ )
_lowerCamelCase = outputs.logits
# verify the logits
_lowerCamelCase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , A_ )
_lowerCamelCase = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1E-4 ) )
| 701
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 0
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 702
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_lowerCamelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A_ , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = '''sgugger/tiny-distilbert-classification'''
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = AutoConfig.from_pretrained(A_ )
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A_ , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ , [config] )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = AutoConfig.from_pretrained(A_ )
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ , [config] )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = AutoConfig.from_pretrained(A_ )
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ , [config] )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase = AutoConfig.from_pretrained(A_ )
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ , configs=[config] )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A_ , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
_lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(A_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(A_ , '''env.csv''' ) , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , '''env.csv''' ) ).exists() )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(A_ ):
self.assertTrue(hasattr(A_ , '''sequential''' ) )
self.assertTrue(hasattr(A_ , '''cumulative''' ) )
self.assertTrue(hasattr(A_ , '''current''' ) )
self.assertTrue(hasattr(A_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , '''log.txt''' ) , log_print=A_ , trace_memory_line_by_line=A_ , eager_mode=A_ , multi_process=A_ , )
_lowerCamelCase = TensorFlowBenchmark(A_ )
_lowerCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A_ , '''log.txt''' ) ).exists() )
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
def __magic_name__( __UpperCAmelCase = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , __UpperCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
'''simple docstring'''
snake_case__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = input('''Enter message: ''' )
_lowerCamelCase = input('''Enter key [alphanumeric]: ''' )
_lowerCamelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
_lowerCamelCase = '''encrypt'''
_lowerCamelCase = encrypt_message(__UpperCAmelCase , __UpperCAmelCase )
elif mode.lower().startswith('''d''' ):
_lowerCamelCase = '''decrypt'''
_lowerCamelCase = decrypt_message(__UpperCAmelCase , __UpperCAmelCase )
print(F'\n{mode.title()}ed message:' )
print(__UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return translate_message(__UpperCAmelCase , __UpperCAmelCase , '''encrypt''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return translate_message(__UpperCAmelCase , __UpperCAmelCase , '''decrypt''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = key.upper()
for symbol in message:
_lowerCamelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__UpperCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__UpperCAmelCase ):
_lowerCamelCase = 0
else:
translated.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = jnp.floataa
A_ = True
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
super().setup()
_lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *A_ , **A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = super().__call__(*A_ , **A_ )
_lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = FlaxBigBirdForNaturalQuestionsModule
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
def cross_entropy(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
_lowerCamelCase = logits.shape[-1]
_lowerCamelCase = (labels[..., None] == jnp.arange(__UpperCAmelCase )[None]).astype('''f4''' )
_lowerCamelCase = jax.nn.log_softmax(__UpperCAmelCase , axis=-1 )
_lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase = reduction(__UpperCAmelCase )
return loss
_lowerCamelCase = partial(__UpperCAmelCase , reduction=jnp.mean )
_lowerCamelCase = cross_entropy(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = cross_entropy(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = cross_entropy(__UpperCAmelCase , __UpperCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 'google/bigbird-roberta-base'
A_ = 3_000
A_ = 10_500
A_ = 128
A_ = 3
A_ = 1
A_ = 5
# tx_args
A_ = 3E-5
A_ = 0.0
A_ = 20_000
A_ = 0.00_95
A_ = 'bigbird-roberta-natural-questions'
A_ = 'training-expt'
A_ = 'data/nq-training.jsonl'
A_ = 'data/nq-validation.jsonl'
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=A_ )
_lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
_lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 4_096 # no dynamic padding on TPUs
def __call__( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.collate_fn(A_ )
_lowerCamelCase = jax.tree_util.tree_map(A_ , A_ )
return batch
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.fetch_inputs(features['''input_ids'''] )
_lowerCamelCase = {
'''input_ids''': jnp.array(A_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(A_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = [self._fetch_inputs(A_ ) for ids in input_ids]
return zip(*A_ )
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = [1 for _ in range(len(A_ ) )]
while len(A_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if seed is not None:
_lowerCamelCase = dataset.shuffle(seed=__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) // batch_size ):
_lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__UpperCAmelCase )
@partial(jax.pmap , axis_name='''batch''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
def loss_fn(__UpperCAmelCase ):
_lowerCamelCase = model_inputs.pop('''start_labels''' )
_lowerCamelCase = model_inputs.pop('''end_labels''' )
_lowerCamelCase = model_inputs.pop('''pooled_labels''' )
_lowerCamelCase = state.apply_fn(**__UpperCAmelCase , params=__UpperCAmelCase , dropout_rng=__UpperCAmelCase , train=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = outputs
return state.loss_fn(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
_lowerCamelCase , _lowerCamelCase = jax.random.split(__UpperCAmelCase )
_lowerCamelCase = jax.value_and_grad(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = grad_fn(state.params )
_lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_lowerCamelCase = jax.lax.pmean(__UpperCAmelCase , '''batch''' )
_lowerCamelCase = state.apply_gradients(grads=__UpperCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __magic_name__( __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = model_inputs.pop('''start_labels''' )
_lowerCamelCase = model_inputs.pop('''end_labels''' )
_lowerCamelCase = model_inputs.pop('''pooled_labels''' )
_lowerCamelCase = state.apply_fn(**__UpperCAmelCase , params=state.params , train=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = state.loss_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class UpperCamelCase ( train_state.TrainState ):
'''simple docstring'''
A_ = struct.field(pytree_node=__lowercase )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = None
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_=None ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = model.params
_lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=A_ , tx=A_ , loss_fn=A_ , )
if ckpt_dir is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = restore_checkpoint(A_ , A_ )
_lowerCamelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_lowerCamelCase , _lowerCamelCase = build_tx(**A_ )
_lowerCamelCase = train_state.TrainState(
step=A_ , apply_fn=model.__call__ , params=A_ , tx=A_ , opt_state=A_ , )
_lowerCamelCase = args
_lowerCamelCase = data_collator
_lowerCamelCase = lr
_lowerCamelCase = params
_lowerCamelCase = jax_utils.replicate(A_ )
return state
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.args
_lowerCamelCase = len(A_ ) // args.batch_size
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(A_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase = get_batched_dataset(A_ , args.batch_size , seed=A_ )
_lowerCamelCase = 0
for batch in tqdm(A_ , total=A_ , desc=F'Running EPOCH-{epoch}' ):
_lowerCamelCase = self.data_collator(A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.train_step_fn(A_ , A_ , **A_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase = jax_utils.unreplicate(state.step )
_lowerCamelCase = running_loss.item() / i
_lowerCamelCase = self.scheduler_fn(state_step - 1 )
_lowerCamelCase = self.evaluate(A_ , A_ )
_lowerCamelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(A_ ) )
self.logger.log(A_ , commit=A_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = get_batched_dataset(A_ , self.args.batch_size )
_lowerCamelCase = len(A_ ) // self.args.batch_size
_lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
_lowerCamelCase = 0
for batch in tqdm(A_ , total=A_ , desc='''Evaluating ... ''' ):
_lowerCamelCase = self.data_collator(A_ )
_lowerCamelCase = self.val_step_fn(A_ , **A_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = jax_utils.unreplicate(A_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(A_ , params=state.params )
with open(os.path.join(A_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(A_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(A_ , '''data_collator.joblib''' ) )
with open(os.path.join(A_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , A_ )
print('''DONE''' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(__UpperCAmelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(__UpperCAmelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_lowerCamelCase = from_bytes(state.opt_state , f.read() )
_lowerCamelCase = joblib.load(os.path.join(__UpperCAmelCase , '''args.joblib''' ) )
_lowerCamelCase = joblib.load(os.path.join(__UpperCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__UpperCAmelCase , '''training_state.json''' ) , '''r''' ) as f:
_lowerCamelCase = json.load(__UpperCAmelCase )
_lowerCamelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = num_train_steps - warmup_steps
_lowerCamelCase = optax.linear_schedule(init_value=__UpperCAmelCase , end_value=__UpperCAmelCase , transition_steps=__UpperCAmelCase )
_lowerCamelCase = optax.linear_schedule(init_value=__UpperCAmelCase , end_value=1E-7 , transition_steps=__UpperCAmelCase )
_lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def weight_decay_mask(__UpperCAmelCase ):
_lowerCamelCase = traverse_util.flatten_dict(__UpperCAmelCase )
_lowerCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(__UpperCAmelCase )
_lowerCamelCase = scheduler_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = optax.adamw(learning_rate=__UpperCAmelCase , weight_decay=__UpperCAmelCase , mask=__UpperCAmelCase )
return tx, lr
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
def __magic_name__( __UpperCAmelCase = 3 , __UpperCAmelCase = 7 , __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
_lowerCamelCase = 0
_lowerCamelCase = 1
for current_denominator in range(1 , limit + 1 ):
_lowerCamelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowerCamelCase = current_numerator
_lowerCamelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
class UpperCamelCase :
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
_lowerCamelCase = {} # Mapping from char to TrieNode
_lowerCamelCase = False
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
for word in words:
self.insert(A_ )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = self
for char in word:
if char not in curr.nodes:
_lowerCamelCase = TrieNode()
_lowerCamelCase = curr.nodes[char]
_lowerCamelCase = True
def UpperCamelCase_ ( self , A_ ) -> bool:
"""simple docstring"""
_lowerCamelCase = self
for char in word:
if char not in curr.nodes:
return False
_lowerCamelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCamelCase = False
return len(curr.nodes ) == 0
_lowerCamelCase = word[index]
_lowerCamelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCamelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
if node.is_leaf:
print(__UpperCAmelCase , end=''' ''' )
for key, value in node.nodes.items():
print_words(__UpperCAmelCase , word + key )
def __magic_name__( ) -> bool:
'''simple docstring'''
_lowerCamelCase = '''banana bananas bandana band apple all beast'''.split()
_lowerCamelCase = TrieNode()
root.insert_many(__UpperCAmelCase )
# print_words(root, "")
assert all(root.find(__UpperCAmelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
print(str(__UpperCAmelCase ) , '''works!''' if passes else '''doesn\'t work :(''' )
def __magic_name__( ) -> None:
'''simple docstring'''
assert test_trie()
def __magic_name__( ) -> None:
'''simple docstring'''
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = (DDPMParallelScheduler,)
def UpperCamelCase_ ( self , **A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A_ )
return config
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = len(A_ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter
_lowerCamelCase = self.dummy_sample_deter + 0.1
_lowerCamelCase = self.dummy_sample_deter - 0.1
_lowerCamelCase = samplea.shape[0]
_lowerCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCamelCase = torch.arange(A_ )[0:3, None].repeat(1 , A_ )
_lowerCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCamelCase = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCamelCase = torch.sum(torch.abs(A_ ) )
_lowerCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = len(A_ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter
_lowerCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
_lowerCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
_lowerCamelCase = pred_prev_sample
_lowerCamelCase = torch.sum(torch.abs(A_ ) )
_lowerCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = len(A_ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter
_lowerCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
_lowerCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
_lowerCamelCase = pred_prev_sample
_lowerCamelCase = torch.sum(torch.abs(A_ ) )
_lowerCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
_lowerCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
_lowerCamelCase = -1
else:
_lowerCamelCase = timesteps[i + 1]
_lowerCamelCase = scheduler.previous_timestep(A_ )
_lowerCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = [1_00, 87, 50, 1, 0]
_lowerCamelCase = len(A_ )
with self.assertRaises(A_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**A_ )
_lowerCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A_ )
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = ort.SessionOptions()
_lowerCamelCase = False
return options
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_lowerCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A red cat sitting on a park bench'''
_lowerCamelCase = np.random.RandomState(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images
_lowerCamelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
_lowerCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A red cat sitting on a park bench'''
_lowerCamelCase = np.random.RandomState(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , mask_image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images
_lowerCamelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 3
_lowerCamelCase = (32, 32)
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = output.images
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0]
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
_lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCamelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = output.images
assert image.shape[0] == 2
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = self.dummy_cond_unet_upscale
_lowerCamelCase = DDPMScheduler()
_lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_lowerCamelCase = self.dummy_vae
_lowerCamelCase = self.dummy_text_encoder
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCamelCase = unet.half()
_lowerCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
_lowerCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , ).images
_lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase = '''a cat sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type='''np''' , )
_lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_lowerCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_lowerCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_lowerCamelCase = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(A_ )
BertModel.from_pretrained(A_ )
BertTokenizer.from_pretrained(A_ )
pipeline(task='''fill-mask''' , model=A_ )
# baseline - just load from_pretrained with normal network
_lowerCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_lowerCamelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase = '''1'''
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_lowerCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_lowerCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_lowerCamelCase = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(A_ )
BertModel.from_pretrained(A_ )
BertTokenizer.from_pretrained(A_ )
pipeline(task='''fill-mask''' , model=A_ )
# baseline - just load from_pretrained with normal network
_lowerCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_lowerCamelCase = self.get_env()
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_lowerCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_lowerCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_lowerCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_lowerCamelCase = self.get_env()
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_lowerCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase = '''1'''
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = '''
from transformers import pipeline
'''
_lowerCamelCase = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_lowerCamelCase = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_lowerCamelCase = self.get_env()
_lowerCamelCase = '''1'''
_lowerCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = '''
from transformers import AutoModel
'''
_lowerCamelCase = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_lowerCamelCase = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_lowerCamelCase = self.get_env()
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase = '''1'''
_lowerCamelCase = subprocess.run(A_ , env=A_ , check=A_ , capture_output=A_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
snake_case__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase = {}
state_dict.pop('''pixel_mean''' , __UpperCAmelCase )
state_dict.pop('''pixel_std''' , __UpperCAmelCase )
_lowerCamelCase = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCamelCase = key.replace(__UpperCAmelCase , __UpperCAmelCase )
if re.match(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = int(re.match(__UpperCAmelCase , __UpperCAmelCase ).group(2 ) )
if layer_nb == 0:
_lowerCamelCase = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
_lowerCamelCase = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
_lowerCamelCase = key.replace('''layers.2''' , '''proj_out''' )
_lowerCamelCase = value
_lowerCamelCase = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="ybelkada/segment-anything" ) -> Any:
'''simple docstring'''
_lowerCamelCase = hf_hub_download(__UpperCAmelCase , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
_lowerCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
_lowerCamelCase = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_lowerCamelCase = SamConfig(
vision_config=__UpperCAmelCase , )
elif "sam_vit_h" in model_name:
_lowerCamelCase = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_lowerCamelCase = SamConfig(
vision_config=__UpperCAmelCase , )
_lowerCamelCase = torch.load(__UpperCAmelCase , map_location='''cpu''' )
_lowerCamelCase = replace_keys(__UpperCAmelCase )
_lowerCamelCase = SamImageProcessor()
_lowerCamelCase = SamProcessor(image_processor=__UpperCAmelCase )
_lowerCamelCase = SamModel(__UpperCAmelCase )
hf_model.load_state_dict(__UpperCAmelCase )
_lowerCamelCase = hf_model.to('''cuda''' )
_lowerCamelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' )
_lowerCamelCase = [[[400, 650]]]
_lowerCamelCase = [[1]]
_lowerCamelCase = processor(images=np.array(__UpperCAmelCase ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowerCamelCase = hf_model(**__UpperCAmelCase )
_lowerCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
_lowerCamelCase = processor(
images=np.array(__UpperCAmelCase ) , input_points=__UpperCAmelCase , input_labels=__UpperCAmelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowerCamelCase = hf_model(**__UpperCAmelCase )
_lowerCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
_lowerCamelCase = ((75, 275, 1725, 850),)
_lowerCamelCase = processor(images=np.array(__UpperCAmelCase ) , input_boxes=__UpperCAmelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowerCamelCase = hf_model(**__UpperCAmelCase )
_lowerCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
_lowerCamelCase = [[[400, 650], [800, 650]]]
_lowerCamelCase = [[1, 1]]
_lowerCamelCase = processor(
images=np.array(__UpperCAmelCase ) , input_points=__UpperCAmelCase , input_labels=__UpperCAmelCase , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_lowerCamelCase = hf_model(**__UpperCAmelCase )
_lowerCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
snake_case__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
snake_case__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
A_ = 'resnet'
A_ = ['basic', 'bottleneck']
def __init__( self , A_=3 , A_=64 , A_=[2_56, 5_12, 10_24, 20_48] , A_=[3, 4, 6, 3] , A_="bottleneck" , A_="relu" , A_=False , A_=None , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
_lowerCamelCase = num_channels
_lowerCamelCase = embedding_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = layer_type
_lowerCamelCase = hidden_act
_lowerCamelCase = downsample_in_first_stage
_lowerCamelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(A_ ) + 1 )]
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-3
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
def get_matched_characters(__UpperCAmelCase , __UpperCAmelCase ) -> str:
_lowerCamelCase = []
_lowerCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase = int(max(0 , i - limit ) )
_lowerCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCAmelCase )
_lowerCamelCase = F'{_stra[0:_stra.index(__UpperCAmelCase )]} {_stra[_stra.index(__UpperCAmelCase ) + 1:]}'
return "".join(__UpperCAmelCase )
# matching characters
_lowerCamelCase = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = len(__UpperCAmelCase )
# transposition
_lowerCamelCase = (
len([(ca, ca) for ca, ca in zip(__UpperCAmelCase , __UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase = 0.0
else:
_lowerCamelCase = (
1
/ 3
* (
match_count / len(__UpperCAmelCase )
+ match_count / len(__UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(A_ , A_ , A_ )
self.assertEqual(A_ , ['''c'''] )
self.assertEqual(A_ , [2] )
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , A_ , A_ )
self.assertEqual(A_ , ['''a''', '''c'''] )
self.assertEqual(A_ , [0, 2] )
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(A_ , [0, 2] , A_ )
self.assertEqual(A_ , ['''a''', '''c'''] )
self.assertEqual(A_ , [0, 2] )
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(A_ , [-3, -1] , A_ )
self.assertEqual(A_ , ['''a''', '''c'''] )
self.assertEqual(A_ , [-3, -1] )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A_ )
# Out features must be a list
with self.assertRaises(A_ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A_ ):
verify_out_features_out_indices(A_ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A_ ):
verify_out_features_out_indices(A_ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A_ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = BackboneMixin()
_lowerCamelCase = ['''a''', '''b''', '''c''']
_lowerCamelCase = ['''a''', '''c''']
_lowerCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_lowerCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_lowerCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
snake_case__ = False
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger '''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A_ )
_lowerCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = generator.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = '''A painting of a squirrel eating a burger '''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
_lowerCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowerCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
snake_case__ = 'true'
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=16 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
_lowerCamelCase = RegressionModel()
_lowerCamelCase = deepcopy(__UpperCAmelCase )
_lowerCamelCase = RegressionDataset(length=__UpperCAmelCase )
_lowerCamelCase = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase )
model.to(accelerator.device )
_lowerCamelCase , _lowerCamelCase = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
return model, ddp_model, dataloader
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
_lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
_lowerCamelCase = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__UpperCAmelCase ):
_lowerCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
with accelerator.main_process_first():
_lowerCamelCase = dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
_lowerCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
if use_longest:
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(__UpperCAmelCase , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=16 )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = Accelerator(dispatch_batches=__UpperCAmelCase , split_batches=__UpperCAmelCase )
_lowerCamelCase = get_dataloader(__UpperCAmelCase , not dispatch_batches )
_lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = []
for batch in dataloader:
_lowerCamelCase , _lowerCamelCase = batch.values()
with torch.no_grad():
_lowerCamelCase = model(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCamelCase , _lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCAmelCase )
targs.append(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = torch.cat(__UpperCAmelCase ), torch.cat(__UpperCAmelCase )
return logits, targs
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=16 ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = get_basic_setup(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = generate_predictions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert (
len(__UpperCAmelCase ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCAmelCase )}'
def __magic_name__( __UpperCAmelCase = False , __UpperCAmelCase = False ) -> Dict:
'''simple docstring'''
_lowerCamelCase = evaluate.load('''glue''' , '''mrpc''' )
_lowerCamelCase , _lowerCamelCase = get_mrpc_setup(__UpperCAmelCase , __UpperCAmelCase )
# First do baseline
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = setup['''no''']
model.to(__UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCAmelCase )
with torch.inference_mode():
_lowerCamelCase = model(**__UpperCAmelCase )
_lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCAmelCase , references=batch['''labels'''] )
_lowerCamelCase = metric.compute()
# Then do distributed
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCamelCase = model(**__UpperCAmelCase )
_lowerCamelCase = outputs.logits.argmax(dim=-1 )
_lowerCamelCase = batch['''labels''']
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCAmelCase , references=__UpperCAmelCase )
_lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __magic_name__( ) -> Dict:
'''simple docstring'''
_lowerCamelCase = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(__UpperCAmelCase , __UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCamelCase = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(__UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
_lowerCamelCase = Accelerator()
test_torch_metrics(__UpperCAmelCase , 512 )
accelerator.state._reset_state()
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 701
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for part_id in partition_order:
_lowerCamelCase = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__UpperCAmelCase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Any:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(100 ).repartition(1 )
_lowerCamelCase = Spark(__UpperCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(10 ).repartition(2 )
_lowerCamelCase = [1, 0]
_lowerCamelCase = _generate_iterable_examples(__UpperCAmelCase , __UpperCAmelCase ) # Reverse the partitions.
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , __UpperCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Dict:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(10 ).repartition(1 )
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
_lowerCamelCase = lambda __UpperCAmelCase : x.reverse()
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [2, 1, 0] )
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase ).shuffle_data_sources(__UpperCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_lowerCamelCase = SparkExamplesIterable(__UpperCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_lowerCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
_lowerCamelCase , _lowerCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __magic_name__( ) -> str:
'''simple docstring'''
_lowerCamelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_lowerCamelCase = spark.range(100 ).repartition(1 )
_lowerCamelCase = Spark(__UpperCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 702
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
snake_case__ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_ , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=A_ , image_encoder=A_ , image_processor=A_ , scheduler=A_ , renderer=A_ , )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
if latents is None:
_lowerCamelCase = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase = latents.to(A_ )
_lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase_ ( self , A_=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
_lowerCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(A_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
if isinstance(A_ , A_ ) and isinstance(image[0] , torch.Tensor ):
_lowerCamelCase = torch.cat(A_ , axis=0 ) if image[0].ndim == 4 else torch.stack(A_ , axis=0 )
if not isinstance(A_ , torch.Tensor ):
_lowerCamelCase = self.image_processor(A_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase = image.to(dtype=self.image_encoder.dtype , device=A_ )
_lowerCamelCase = self.image_encoder(A_ )['''last_hidden_state''']
_lowerCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase = image_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase = torch.zeros_like(A_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ = 1 , A_ = 25 , A_ = None , A_ = None , A_ = 4.0 , A_ = 64 , A_ = "pil" , A_ = True , ) -> Any:
"""simple docstring"""
if isinstance(A_ , PIL.Image.Image ):
_lowerCamelCase = 1
elif isinstance(A_ , torch.Tensor ):
_lowerCamelCase = image.shape[0]
elif isinstance(A_ , A_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase = len(A_ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A_ )}' )
_lowerCamelCase = self._execution_device
_lowerCamelCase = batch_size * num_images_per_prompt
_lowerCamelCase = guidance_scale > 1.0
_lowerCamelCase = self._encode_image(A_ , A_ , A_ , A_ )
# prior
self.scheduler.set_timesteps(A_ , device=A_ )
_lowerCamelCase = self.scheduler.timesteps
_lowerCamelCase = self.prior.config.num_embeddings
_lowerCamelCase = self.prior.config.embedding_dim
_lowerCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase = latents.reshape(latents.shape[0] , A_ , A_ )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase = self.scheduler.scale_model_input(A_ , A_ )
_lowerCamelCase = self.prior(
A_ , timestep=A_ , proj_embedding=A_ , ).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase = noise_pred.chunk(2 )
_lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase = self.scheduler.step(
A_ , timestep=A_ , sample=A_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=A_ )
_lowerCamelCase = []
for i, latent in enumerate(A_ ):
print()
_lowerCamelCase = self.renderer.decode(
latent[None, :] , A_ , size=A_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(A_ )
_lowerCamelCase = torch.stack(A_ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase = [self.numpy_to_pil(A_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=A_ )
| 703
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
import math
def __magic_name__( __UpperCAmelCase ) -> list:
'''simple docstring'''
_lowerCamelCase = [True] * n
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCamelCase = i * 2
while index < n:
_lowerCamelCase = False
_lowerCamelCase = index + i
_lowerCamelCase = [2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def __magic_name__( __UpperCAmelCase = 9999_6666_3333 ) -> int:
'''simple docstring'''
_lowerCamelCase = math.floor(math.sqrt(__UpperCAmelCase ) ) + 100
_lowerCamelCase = prime_sieve(__UpperCAmelCase )
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCamelCase = primes[prime_index + 1]
_lowerCamelCase = last_prime**2
_lowerCamelCase = next_prime**2
# Get numbers divisible by lps(current)
_lowerCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
snake_case__ = get_logger(__name__)
class UpperCamelCase :
'''simple docstring'''
A_ = 'dummy_data'
A_ = 'datasets'
A_ = False
def __init__( self , A_ , A_ , A_ , A_ = None , A_ = False , A_ = True , A_ = None , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = 0
_lowerCamelCase = dataset_name
_lowerCamelCase = cache_dir
_lowerCamelCase = use_local_dummy_data
_lowerCamelCase = config
# download_callbacks take a single url as input
_lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase = str(A_ )
# to be downloaded
_lowerCamelCase = None
_lowerCamelCase = None
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
_lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_ )
return os.path.join(A_ , self.dummy_file_name )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
if self._bucket_url is None:
_lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def UpperCamelCase_ ( self , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_ ):
return self.create_dummy_data_dict(A_ , A_ )
elif isinstance(A_ , (list, tuple) ):
return self.create_dummy_data_list(A_ , A_ )
else:
return self.create_dummy_data_single(A_ , A_ )
def UpperCamelCase_ ( self , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
return self.download_and_extract(A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(A_ )
def UpperCamelCase_ ( self , A_ , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return path
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {}
def UpperCamelCase_ ( self , A_ , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_ ):
for single_url in single_urls:
download_callback(A_ )
else:
_lowerCamelCase = single_urls
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_ ):
_lowerCamelCase = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) ) for x in single_urls]
else:
_lowerCamelCase = single_urls
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(Path(A_ ).name ) )
_lowerCamelCase = value
# make sure that values are unique
if all(isinstance(A_ , A_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A_ ) ) for url in data_url )
_lowerCamelCase = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase = [data_url[0]] * len(A_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A_ )
return dummy_data_list
def UpperCamelCase_ ( self , A_ , A_ ) -> Any:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(A_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase = os.path.join(A_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCamelCase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
def _iter_archive_members(A_ ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase = Path(self.dummy_file ).parent
_lowerCamelCase = path.relative_to(A_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A_ )
_lowerCamelCase = Path(A_ )
_lowerCamelCase = _iter_archive_members(A_ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A_ ).as_posix(), file_path.open('''rb''' )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ):
_lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(A_ ):
if os.path.basename(A_ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_ ):
if os.path.basename(A_ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A_ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A_ , A_ )
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "" , A_ = False ) -> None:
"""simple docstring"""
_lowerCamelCase = {}
# A node will be a leaf if the tree contains its word
_lowerCamelCase = is_leaf
_lowerCamelCase = prefix
def UpperCamelCase_ ( self , A_ ) -> tuple[str, str, str]:
"""simple docstring"""
_lowerCamelCase = 0
for q, w in zip(self.prefix , A_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
for word in words:
self.insert(A_ )
def UpperCamelCase_ ( self , A_ ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_lowerCamelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_lowerCamelCase = RadixNode(prefix=A_ , is_leaf=A_ )
else:
_lowerCamelCase = self.nodes[word[0]]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = incoming_node.match(
A_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(A_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_lowerCamelCase = remaining_prefix
_lowerCamelCase = self.nodes[matching_string[0]]
_lowerCamelCase = RadixNode(A_ , A_ )
_lowerCamelCase = aux_node
if remaining_word == "":
_lowerCamelCase = True
else:
self.nodes[matching_string[0]].insert(A_ )
def UpperCamelCase_ ( self , A_ ) -> bool:
"""simple docstring"""
_lowerCamelCase = self.nodes.get(word[0] , A_ )
if not incoming_node:
return False
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = incoming_node.match(
A_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(A_ )
def UpperCamelCase_ ( self , A_ ) -> bool:
"""simple docstring"""
_lowerCamelCase = self.nodes.get(word[0] , A_ )
if not incoming_node:
return False
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = incoming_node.match(
A_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(A_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_lowerCamelCase = list(self.nodes.values() )[0]
_lowerCamelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_lowerCamelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_lowerCamelCase = False
# If there is 1 edge, we merge it with its child
else:
_lowerCamelCase = list(incoming_node.nodes.values() )[0]
_lowerCamelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_lowerCamelCase = merging_node.nodes
return True
def UpperCamelCase_ ( self , A_ = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __magic_name__( ) -> bool:
'''simple docstring'''
_lowerCamelCase = '''banana bananas bandana band apple all beast'''.split()
_lowerCamelCase = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __magic_name__( ) -> None:
'''simple docstring'''
assert test_trie()
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = RadixNode()
_lowerCamelCase = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__UpperCAmelCase )
print('''Words:''' , __UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=A_ , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
_lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
_lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
_lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=A_ , py_version='''py36''' , )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(A_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.create_estimator(A_ )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , A_ )
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '▁'
snake_case__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
snake_case__ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
snake_case__ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
snake_case__ = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
snake_case__ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['input_ids']
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = RESOURCE_FILES_NAMES
def __init__( self , A_ , A_=None , A_=False , A_="utf8" , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , vocab_file=A_ , encoding=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
_lowerCamelCase = do_lower_case
_lowerCamelCase = sentencepiece_model_ckpt
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_lowerCamelCase = self.load_vocab(filepath=A_ )
else:
_lowerCamelCase = {self.sp_model.id_to_piece(A_ ): id for id in range(self.sp_model.get_piece_size() )}
_lowerCamelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
if text is None:
return None
_lowerCamelCase = self.tokenize(A_ )
_lowerCamelCase , _lowerCamelCase = '''''', []
for i, ch in enumerate(A_ ):
if ch in self.SP_CHAR_MAPPING:
_lowerCamelCase = self.SP_CHAR_MAPPING.get(A_ )
else:
_lowerCamelCase = unicodedata.normalize('''NFKC''' , A_ )
if self.is_whitespace(A_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A_ ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = normalized_text, [], 0
if self.do_lower_case:
_lowerCamelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_lowerCamelCase = token[1:]
_lowerCamelCase = text[offset:].index(A_ ) + offset
_lowerCamelCase = start + len(A_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_lowerCamelCase = end
return token_mapping
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(A_ , A_ ) for c in text) )
def UpperCamelCase_ ( self , A_ , A_=False , A_=64 , A_=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
_lowerCamelCase = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
_lowerCamelCase = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
_lowerCamelCase = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
_lowerCamelCase = self.sp_model.EncodeAsPieces(A_ )
else:
_lowerCamelCase = self.sp_model.SampleEncodeAsPieces(A_ , A_ , A_ )
_lowerCamelCase = []
for pi, piece in enumerate(A_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A_ ) and pi != 0:
new_pieces.append(A_ )
continue
else:
continue
_lowerCamelCase = 0
for i, chunk in enumerate(A_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A_ ) or self.is_punct(A_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A_ )
_lowerCamelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_lowerCamelCase = i
if len(A_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ ).replace(A_ , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.convert_ids_to_tokens(A_ )
_lowerCamelCase = ''''''.join(A_ ).replace(A_ , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(A_ , self.unk_token )
def UpperCamelCase_ ( self , A_ , A_=None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self , A_ , A_=None ) -> List[Any]:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self , A_ , A_=None , A_=False ) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A_ ) + 1) + [1] * (len(A_ ) + 3)
def UpperCamelCase_ ( self , A_ ) -> Dict:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A_ ) == 1:
_lowerCamelCase = unicodedata.category(A_ )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = {}
with io.open(A_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(A_ ):
_lowerCamelCase = line.rstrip('''\n''' )
_lowerCamelCase = int(A_ )
return token_to_idx
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
_lowerCamelCase = 0
if os.path.isdir(A_ ):
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_lowerCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(A_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(token + '''\n''' )
index += 1
_lowerCamelCase = os.path.join(A_ , '''sentencepiece.bpe.model''' )
with open(A_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (vocab_file,)
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import socket
def __magic_name__( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCamelCase = socket.gethostname()
_lowerCamelCase = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
_lowerCamelCase = sock.recv(1024 )
if not data:
break
out_file.write(__UpperCAmelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
snake_case__ = True
except (ImportError, AttributeError):
snake_case__ = object
def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
pass
snake_case__ = False
snake_case__ = logging.get_logger('transformers-cli/serving')
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCAmelCase , args.host , args.port , args.workers )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=A_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=A_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=A_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=A_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=A_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=A_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=A_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=A_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = pipeline
_lowerCamelCase = host
_lowerCamelCase = port
_lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'Serving model over {host}:{port}' )
_lowerCamelCase = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=A_ , response_class=A_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=A_ , response_class=A_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=A_ , response_class=A_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=A_ , response_class=A_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase_ ( self , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) ) -> str:
"""simple docstring"""
try:
_lowerCamelCase = self._pipeline.tokenizer.tokenize(A_ )
if return_ids:
_lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(A_ )
return ServeTokenizeResult(tokens=A_ , tokens_ids=A_ )
else:
return ServeTokenizeResult(tokens=A_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(A_ )} )
def UpperCamelCase_ ( self , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) , ) -> int:
"""simple docstring"""
try:
_lowerCamelCase = self._pipeline.tokenizer.decode(A_ , A_ , A_ )
return ServeDeTokenizeResult(model='''''' , text=A_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(A_ )} )
async def UpperCamelCase_ ( self , A_=Body(A_ , embed=A_ ) ) -> Any:
"""simple docstring"""
# Check we don't have empty string
if len(A_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_lowerCamelCase = self._pipeline(A_ )
return ServeForwardResult(output=A_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(A_ )} )
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
from __future__ import annotations
snake_case__ = 8.988E9 # units = N * m^s * C^-2
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict[str, float]:
'''simple docstring'''
_lowerCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
_lowerCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCamelCase = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCamelCase = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__UpperCAmelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
@flax_register_to_config
class UpperCamelCase ( nn.Module , __lowercase , __lowercase ):
'''simple docstring'''
A_ = 32
A_ = 4
A_ = 4
A_ = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
A_ = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D')
A_ = False
A_ = (320, 640, 1_280, 1_280)
A_ = 2
A_ = 8
A_ = None
A_ = 1_280
A_ = 0.0
A_ = False
A_ = jnp.floataa
A_ = True
A_ = 0
A_ = False
def UpperCamelCase_ ( self , A_ ) -> FrozenDict:
"""simple docstring"""
_lowerCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCamelCase = jnp.zeros(A_ , dtype=jnp.floataa )
_lowerCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_lowerCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_lowerCamelCase , _lowerCamelCase = jax.random.split(A_ )
_lowerCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.block_out_channels
_lowerCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_lowerCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_lowerCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_lowerCamelCase = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
_lowerCamelCase = self.only_cross_attention
if isinstance(A_ , A_ ):
_lowerCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
_lowerCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_lowerCamelCase = []
_lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
_lowerCamelCase = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowerCamelCase = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
_lowerCamelCase = down_blocks
# mid
_lowerCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_lowerCamelCase = []
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_lowerCamelCase = output_channel
_lowerCamelCase = reversed_block_out_channels[i]
_lowerCamelCase = reversed_block_out_channels[min(i + 1 , len(A_ ) - 1 )]
_lowerCamelCase = i == len(A_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowerCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowerCamelCase = FlaxUpBlockaD(
in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(A_ )
_lowerCamelCase = output_channel
_lowerCamelCase = up_blocks
# out
_lowerCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_=None , A_=None , A_ = True , A_ = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
# 1. time
if not isinstance(A_ , jnp.ndarray ):
_lowerCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps.astype(dtype=jnp.floataa )
_lowerCamelCase = jnp.expand_dims(A_ , 0 )
_lowerCamelCase = self.time_proj(A_ )
_lowerCamelCase = self.time_embedding(A_ )
# 2. pre-process
_lowerCamelCase = jnp.transpose(A_ , (0, 2, 3, 1) )
_lowerCamelCase = self.conv_in(A_ )
# 3. down
_lowerCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
_lowerCamelCase , _lowerCamelCase = down_block(A_ , A_ , A_ , deterministic=not train )
else:
_lowerCamelCase , _lowerCamelCase = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowerCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
A_ , A_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowerCamelCase = new_down_block_res_samples
# 4. mid
_lowerCamelCase = self.mid_block(A_ , A_ , A_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowerCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowerCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(A_ , A_ ):
_lowerCamelCase = up_block(
A_ , temb=A_ , encoder_hidden_states=A_ , res_hidden_states_tuple=A_ , deterministic=not train , )
else:
_lowerCamelCase = up_block(A_ , temb=A_ , res_hidden_states_tuple=A_ , deterministic=not train )
# 6. post-process
_lowerCamelCase = self.conv_norm_out(A_ )
_lowerCamelCase = nn.silu(A_ )
_lowerCamelCase = self.conv_out(A_ )
_lowerCamelCase = jnp.transpose(A_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=A_ )
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
snake_case__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
_lowerCamelCase = Stack()
_lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCAmelCase )
elif i == ")":
# RULE 4
_lowerCamelCase = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operators[opr](__UpperCAmelCase , __UpperCAmelCase )
operand_stack.push(__UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from collections.abc import Sequence
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__UpperCAmelCase ) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
_lowerCamelCase = 0.0
for coeff in reversed(__UpperCAmelCase ):
_lowerCamelCase = result * x + coeff
return result
if __name__ == "__main__":
snake_case__ = (0.0, 0.0, 5.0, 9.3, 7.0)
snake_case__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = DDIMPipeline
A_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
A_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
A_ = False
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
_lowerCamelCase = DDIMScheduler()
_lowerCamelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase_ ( self , A_ , A_=0 ) -> int:
"""simple docstring"""
if str(A_ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(A_ )
else:
_lowerCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCamelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = '''cpu'''
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCamelCase = self.get_dummy_inputs(A_ )
_lowerCamelCase = pipe(**A_ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
_lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1E-3 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = '''google/ddpm-cifar10-32'''
_lowerCamelCase = UNetaDModel.from_pretrained(A_ )
_lowerCamelCase = DDIMScheduler()
_lowerCamelCase = DDIMPipeline(unet=A_ , scheduler=A_ )
ddim.to(A_ )
ddim.set_progress_bar_config(disable=A_ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = ddim(generator=A_ , eta=0.0 , output_type='''numpy''' ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = '''google/ddpm-ema-bedroom-256'''
_lowerCamelCase = UNetaDModel.from_pretrained(A_ )
_lowerCamelCase = DDIMScheduler.from_pretrained(A_ )
_lowerCamelCase = DDIMPipeline(unet=A_ , scheduler=A_ )
ddpm.to(A_ )
ddpm.set_progress_bar_config(disable=A_ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = ddpm(generator=A_ , output_type='''numpy''' ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_lowerCamelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
snake_case__ = logging.getLogger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'sequence-classification'
def __init__( self , A_ ) -> str:
"""simple docstring"""
if type(A_ ) == dict:
_lowerCamelCase = Namespace(**A_ )
_lowerCamelCase = glue_output_modes[hparams.task]
_lowerCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def UpperCamelCase_ ( self , **A_ ) -> Optional[int]:
"""simple docstring"""
return self.model(**A_ )
def UpperCamelCase_ ( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCamelCase = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_lowerCamelCase = self(**A_ )
_lowerCamelCase = outputs[0]
_lowerCamelCase = self.trainer.lr_schedulers[0]['''scheduler''']
_lowerCamelCase = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.hparams
_lowerCamelCase = processors[args.task]()
_lowerCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
_lowerCamelCase = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_lowerCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
_lowerCamelCase = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , A_ )
torch.save(A_ , A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ = False ) -> DataLoader:
"""simple docstring"""
_lowerCamelCase = '''dev''' if mode == '''test''' else mode
_lowerCamelCase = self._feature_file(A_ )
logger.info('''Loading features from cached file %s''' , A_ )
_lowerCamelCase = torch.load(A_ )
_lowerCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCamelCase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCamelCase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def UpperCamelCase_ ( self , A_ , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCamelCase = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
_lowerCamelCase = self(**A_ )
_lowerCamelCase , _lowerCamelCase = outputs[:2]
_lowerCamelCase = logits.detach().cpu().numpy()
_lowerCamelCase = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase_ ( self , A_ ) -> tuple:
"""simple docstring"""
_lowerCamelCase = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
_lowerCamelCase = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCamelCase = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCamelCase = np.squeeze(A_ )
_lowerCamelCase = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
_lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
_lowerCamelCase = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
_lowerCamelCase = dict(results.items() )
_lowerCamelCase = results
return ret, preds_list, out_label_list
def UpperCamelCase_ ( self , A_ ) -> dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._eval_end(A_ )
_lowerCamelCase = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase_ ( self , A_ ) -> dict:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._eval_end(A_ )
_lowerCamelCase = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=A_ , required=A_ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __magic_name__( ) -> Dict:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
add_generic_args(__UpperCAmelCase , os.getcwd() )
_lowerCamelCase = GLUETransformer.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
_lowerCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCamelCase = os.path.join(
'''./results''' , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
_lowerCamelCase = GLUETransformer(__UpperCAmelCase )
_lowerCamelCase = generic_train(__UpperCAmelCase , __UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__UpperCAmelCase ) )
_lowerCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase = dct.pop(__UpperCAmelCase )
_lowerCamelCase = val
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase = 8
# set labels if required
if not base_model:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase = 384
_lowerCamelCase = 1536
_lowerCamelCase = 12
_lowerCamelCase = 6
# load original model from torch hub
_lowerCamelCase = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
_lowerCamelCase = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
_lowerCamelCase = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase = ViTImageProcessor()
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(__UpperCAmelCase )
if base_model:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCamelCase = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 638
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['pixel_values']
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 2_55 , A_ = True , A_ = None , A_ = True , **A_ , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
_lowerCamelCase = get_size_dict(A_ , default_to_square=A_ )
_lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
_lowerCamelCase = get_size_dict(A_ , param_name='''crop_size''' )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_center_crop
_lowerCamelCase = crop_size
_lowerCamelCase = do_flip_channel_order
def UpperCamelCase_ ( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
_lowerCamelCase = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(A_ , data_format=A_ )
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(A_ , default_to_square=A_ )
_lowerCamelCase = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase = get_size_dict(A_ , param_name='''crop_size''' )
_lowerCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowerCamelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCamelCase = [self.flip_channel_order(image=A_ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> int:
"""simple docstring"""
_lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
_lowerCamelCase = target_sizes.numpy()
_lowerCamelCase = []
for idx in range(len(A_ ) ):
_lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
_lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
_lowerCamelCase = logits.argmax(dim=1 )
_lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 719
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
snake_case__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCamelCase = '''lm_head'''
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 42
_lowerCamelCase = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_lowerCamelCase = UniSpeechForCTC(__UpperCAmelCase )
else:
_lowerCamelCase = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCamelCase = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 638
| 0
|
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCamelCase = 6
_lowerCamelCase = 1
_lowerCamelCase = 1901
_lowerCamelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCamelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 720
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638
| 0
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
snake_case__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
snake_case__ = []
snake_case__ = []
snake_case__ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
snake_case__ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
snake_case__ = 0
for log in Path().glob('*.log'):
snake_case__ = 0
with open(log, 'r') as f:
for line in f:
snake_case__ = json.loads(line)
if line.get('nodeid', '') != "":
snake_case__ = line['nodeid']
if line.get('duration', None) is not None:
snake_case__ = f'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
snake_case__ = []
log.unlink()
snake_case__ = ''
snake_case__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
snake_case__ = []
snake_case__ = {}
for test in failed_tests:
snake_case__ = test[0].split('::')
snake_case__ = data[0].split('/')[-1]
if data[0] not in filesafailed:
snake_case__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
snake_case__ = [test[0] for test in failed_table]
snake_case__ = list(set(files))
# Count number of instances in failed_tests
snake_case__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
snake_case__ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
snake_case__ = 'Too many failed tests, please see the full report in the Action results.'
snake_case__ = len(err) + 10
snake_case__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
snake_case__ = 'No failed tests! 🤗'
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
snake_case__ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
snake_case__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
snake_case__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
snake_case__ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
snake_case__ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
snake_case__ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
snake_case__ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
snake_case__ = row[0]
else:
snake_case__ = ''
snake_case__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = RobertaTokenizer
A_ = RobertaTokenizerFast
A_ = True
A_ = {'cls_token': '<s>'}
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase = {'''unk_token''': '''<unk>'''}
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
def UpperCamelCase_ ( self , **A_ ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = '''lower newer'''
return input_text, output_text
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase = tokenizer.tokenize(A_ ) # , add_prefix_space=True)
self.assertListEqual(A_ , A_ )
_lowerCamelCase = tokens + [tokenizer.unk_token]
_lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=A_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=A_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.tokenizer_class.from_pretrained('''roberta-base''' )
_lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
_lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = '''Encode this sequence.'''
_lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A_ , A_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A_ , A_ )
# Testing spaces after special tokens
_lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A_ , lstrip=A_ , rstrip=A_ )} ) # mask token has a left space
_lowerCamelCase = tokenizer.convert_tokens_to_ids(A_ )
_lowerCamelCase = '''Encode <mask> sequence'''
_lowerCamelCase = '''Encode <mask>sequence'''
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = encoded.index(A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A_ , A_ )
_lowerCamelCase = tokenizer.encode(A_ )
_lowerCamelCase = encoded.index(A_ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
_lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
_lowerCamelCase = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
_lowerCamelCase = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , A_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , A_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase = F'{text_of_1_token} {text_of_1_token}'
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
_lowerCamelCase = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
| 700
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 638
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( A_ ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
raise NotImplementedError()
| 701
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 638
| 0
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = '▁'
snake_case__ = {'vocab_file': 'prophetnet.tokenizer'}
snake_case__ = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
snake_case__ = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
snake_case__ = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase = collections.OrderedDict()
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
_lowerCamelCase = reader.readlines()
for index, token in enumerate(__UpperCAmelCase ):
_lowerCamelCase = token.rstrip('''\n''' )
_lowerCamelCase = index
return vocab
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_="[SEP]" , A_="[SEP]" , A_="[SEP]" , A_="[UNK]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
_lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowerCamelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
_lowerCamelCase = F'[unused{i}]'
_lowerCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowerCamelCase = 12
_lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = ''''''.join(A_ ).replace(A_ , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def UpperCamelCase_ ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 702
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = model.config
_lowerCamelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCamelCase = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "encoder.model" in name:
_lowerCamelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_lowerCamelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''encoder.layernorm.bias'''
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = int(key_split[5] )
_lowerCamelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
_lowerCamelCase , _lowerCamelCase = get_configs(__UpperCAmelCase )
_lowerCamelCase = DonutSwinModel(__UpperCAmelCase )
_lowerCamelCase = MBartForCausalLM(__UpperCAmelCase )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
_lowerCamelCase = original_model.state_dict()
_lowerCamelCase = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
_lowerCamelCase = load_dataset('''hf-internal-testing/example-documents''' )
_lowerCamelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
_lowerCamelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCamelCase = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = '''When is the coffee break?'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCamelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCamelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCamelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCamelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCamelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_lowerCamelCase = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_lowerCamelCase = original_model.encoder.model.patch_embed(__UpperCAmelCase )
_lowerCamelCase , _lowerCamelCase = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
# verify encoder hidden states
_lowerCamelCase = original_model.encoder(__UpperCAmelCase )
_lowerCamelCase = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
# verify decoder hidden states
_lowerCamelCase = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
_lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
snake_case__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 638
| 0
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> float:
'''simple docstring'''
_lowerCamelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
_lowerCamelCase = 1 - (matter_density + radiation_density + dark_energy)
_lowerCamelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowerCamelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 703
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case__ = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = UNetaDModel
A_ = 'sample'
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = 4
_lowerCamelCase = 3
_lowerCamelCase = (32, 32)
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
_lowerCamelCase = torch.tensor([10] ).to(A_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
_lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = UNetaDModel
A_ = 'sample'
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = 4
_lowerCamelCase = 4
_lowerCamelCase = (32, 32)
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
_lowerCamelCase = torch.tensor([10] ).to(A_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
_lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(A_ )
_lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=A_ )
model.to(A_ )
_lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=A_ )
model_accelerate.to(A_ )
model_accelerate.eval()
_lowerCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCamelCase = noise.to(A_ )
_lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A_ )
_lowerCamelCase = model_accelerate(A_ , A_ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowerCamelCase , _lowerCamelCase = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=A_ , low_cpu_mem_usage=A_ )
model_normal_load.to(A_ )
model_normal_load.eval()
_lowerCamelCase = model_normal_load(A_ , A_ )['''sample''']
assert torch_all_close(A_ , A_ , rtol=1E-3 )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(A_ )
_lowerCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCamelCase = noise.to(A_ )
_lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ , A_ ).sample
_lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCamelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-3 ) )
class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = UNetaDModel
A_ = 'sample'
@property
def UpperCamelCase_ ( self , A_=(32, 32) ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = 4
_lowerCamelCase = 3
_lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
_lowerCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=A_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
_lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(A_ )
_lowerCamelCase = self.dummy_input
_lowerCamelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(A_ )
_lowerCamelCase = noise
_lowerCamelCase = model(**A_ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(A_ )
_lowerCamelCase = 4
_lowerCamelCase = 3
_lowerCamelCase = (2_56, 2_56)
_lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A_ )
_lowerCamelCase = torch.tensor(batch_size * [1E-4] ).to(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ , A_ ).sample
_lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCamelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(A_ )
_lowerCamelCase = 4
_lowerCamelCase = 3
_lowerCamelCase = (32, 32)
_lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A_ )
_lowerCamelCase = torch.tensor(batch_size * [1E-4] ).to(A_ )
with torch.no_grad():
_lowerCamelCase = model(A_ , A_ ).sample
_lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCamelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
pass
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = vocab_size - 1
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = GPTNeoXModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ )
_lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = GPTNeoXModel(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = GPTNeoXForCausalLM(config=A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = GPTNeoXForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = GPTNeoXForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.num_labels
_lowerCamelCase = GPTNeoXForTokenClassification(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = GPTNeoXForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
_lowerCamelCase = model(A_ , attention_mask=A_ , use_cache=A_ )
_lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase = model(A_ , attention_mask=A_ , output_hidden_states=A_ )
_lowerCamelCase = output_from_no_past['''hidden_states'''][0]
_lowerCamelCase = model(
A_ , attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
A_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
A_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = GPTNeoXModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ , hidden_size=64 , num_attention_heads=8 )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A_ , A_ , A_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A_ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCamelCase_ ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase = GPTNeoXModel(A_ )
original_model.to(A_ )
original_model.eval()
_lowerCamelCase = original_model(A_ ).last_hidden_state
_lowerCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase = {'''type''': scaling_type, '''factor''': 10.0}
_lowerCamelCase = GPTNeoXModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
_lowerCamelCase = scaled_model(A_ ).last_hidden_state
_lowerCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
_lowerCamelCase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A_ )
_lowerCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_lowerCamelCase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
_lowerCamelCase = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 )
_lowerCamelCase = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
| 705
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , A_ = 6_55_36 , A_ = None , A_ = 2 , A_ = 2 , A_ = 0 , A_ = "fourier" , A_ = True , A_ = False , A_ = 0.0 , A_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A_ = "UNetMidBlock1D" , A_ = None , A_ = (32, 32, 64) , A_ = None , A_ = 8 , A_ = 1 , A_ = False , ) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A_ , log=A_ , flip_sin_to_cos=A_ )
_lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A_ , downscale_freq_shift=A_ )
_lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase = block_out_channels[0] * 4
_lowerCamelCase = TimestepEmbedding(
in_channels=A_ , time_embed_dim=A_ , act_fn=A_ , out_dim=block_out_channels[0] , )
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
_lowerCamelCase = nn.ModuleList([] )
_lowerCamelCase = None
# down
_lowerCamelCase = in_channels
for i, down_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_down_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A_ )
# mid
_lowerCamelCase = get_mid_block(
A_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A_ , add_downsample=A_ , )
# up
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase = out_channels
else:
_lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
_lowerCamelCase = output_channel
_lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(A_ ) - 1 else final_upsample_channels
)
_lowerCamelCase = i == len(A_ ) - 1
_lowerCamelCase = get_up_block(
A_ , num_layers=A_ , in_channels=A_ , out_channels=A_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A_ )
_lowerCamelCase = output_channel
# out
_lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase = get_out_block(
out_block_type=A_ , num_groups_out=A_ , embed_dim=block_out_channels[0] , out_channels=A_ , act_fn=A_ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , A_ , A_ , A_ = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
_lowerCamelCase = timestep
if not torch.is_tensor(A_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(sample.device )
_lowerCamelCase = self.time_proj(A_ )
if self.config.use_timestep_embedding:
_lowerCamelCase = self.time_mlp(A_ )
else:
_lowerCamelCase = timestep_embed[..., None]
_lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase = ()
for downsample_block in self.down_blocks:
_lowerCamelCase , _lowerCamelCase = downsample_block(hidden_states=A_ , temb=A_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase = self.mid_block(A_ , A_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase = down_block_res_samples[-1:]
_lowerCamelCase = down_block_res_samples[:-1]
_lowerCamelCase = upsample_block(A_ , res_hidden_states_tuple=A_ , temb=A_ )
# 5. post-process
if self.out_block:
_lowerCamelCase = self.out_block(A_ , A_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A_ )
| 638
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = [state.process_index]
_lowerCamelCase = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, F'{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
_lowerCamelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_lowerCamelCase = torch.arange(state.num_processes ).to(state.device )
_lowerCamelCase = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if state.num_processes != 2:
return
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = reduce(__UpperCAmelCase , '''sum''' )
_lowerCamelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F'{reduced_tensor} != {truth_tensor}'
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if state.num_processes != 2:
return
_lowerCamelCase = create_tensor(__UpperCAmelCase )
_lowerCamelCase = reduce(__UpperCAmelCase , '''mean''' )
_lowerCamelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F'{reduced_tensor} != {truth_tensor}'
def __magic_name__( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
def __magic_name__( ) -> int:
'''simple docstring'''
_lowerCamelCase = PartialState()
state.print(F'State: {state}' )
state.print('''testing gather''' )
test_gather(__UpperCAmelCase )
state.print('''testing gather_object''' )
test_gather_object(__UpperCAmelCase )
state.print('''testing broadcast''' )
test_broadcast(__UpperCAmelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__UpperCAmelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__UpperCAmelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 706
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case__ = [0, 25, 50]
snake_case__ = [25, 50, 75]
snake_case__ = fuzz.membership.trimf(X, abca)
snake_case__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case__ = np.ones(75)
snake_case__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 638
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class UpperCamelCase ( __lowercase , __lowercase ):
'''simple docstring'''
A_ = 'bit'
A_ = ['preactivation', 'bottleneck']
A_ = ['SAME', 'VALID']
def __init__( self , A_=3 , A_=64 , A_=[2_56, 5_12, 10_24, 20_48] , A_=[3, 4, 6, 3] , A_="preactivation" , A_="relu" , A_=None , A_=32 , A_=0.0 , A_=False , A_=32 , A_=1 , A_=None , A_=None , **A_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
_lowerCamelCase = num_channels
_lowerCamelCase = embedding_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = layer_type
_lowerCamelCase = hidden_act
_lowerCamelCase = global_padding
_lowerCamelCase = num_groups
_lowerCamelCase = drop_path_rate
_lowerCamelCase = embedding_dynamic_padding
_lowerCamelCase = output_stride
_lowerCamelCase = width_factor
_lowerCamelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(A_ ) + 1 )]
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 707
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger()
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def __call__( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = Tracker(self.dest )(A_ ).parametrized
_lowerCamelCase = Tracker(self.src )(A_ ).parametrized
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_lowerCamelCase = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(A_ )} operations while'
F' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
_lowerCamelCase = ResNetForImageClassification(__UpperCAmelCase ).eval()
_lowerCamelCase = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
_lowerCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
_lowerCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
_lowerCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 1000
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
_lowerCamelCase = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
snake_case__ = parser.parse_args()
snake_case__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 638
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __magic_name__( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = SwinvaConfig()
_lowerCamelCase = swinva_name.split('''_''' )
_lowerCamelCase = name_split[1]
if "to" in name_split[3]:
_lowerCamelCase = int(name_split[3][-3:] )
else:
_lowerCamelCase = int(name_split[3] )
if "to" in name_split[2]:
_lowerCamelCase = int(name_split[2][-2:] )
else:
_lowerCamelCase = int(name_split[2][6:] )
if model_size == "tiny":
_lowerCamelCase = 96
_lowerCamelCase = (2, 2, 6, 2)
_lowerCamelCase = (3, 6, 12, 24)
elif model_size == "small":
_lowerCamelCase = 96
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (3, 6, 12, 24)
elif model_size == "base":
_lowerCamelCase = 128
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (4, 8, 16, 32)
else:
_lowerCamelCase = 192
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_lowerCamelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_lowerCamelCase = 2_1841
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-22k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
else:
_lowerCamelCase = 1000
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = img_size
_lowerCamelCase = num_classes
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = num_heads
_lowerCamelCase = window_size
return config
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
_lowerCamelCase = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
_lowerCamelCase = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
_lowerCamelCase = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
_lowerCamelCase = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
_lowerCamelCase = '''layernorm.weight'''
if name == "norm.bias":
_lowerCamelCase = '''layernorm.bias'''
if "head" in name:
_lowerCamelCase = name.replace('''head''' , '''classifier''' )
else:
_lowerCamelCase = '''swinv2.''' + name
return name
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(__UpperCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[1] )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[
dim : dim * 2
]
_lowerCamelCase = val[-dim:]
else:
_lowerCamelCase = val
return orig_state_dict
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase )
timm_model.eval()
_lowerCamelCase = get_swinva_config(__UpperCAmelCase )
_lowerCamelCase = SwinvaForImageClassification(__UpperCAmelCase )
model.eval()
_lowerCamelCase = convert_state_dict(timm_model.state_dict() , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
_lowerCamelCase = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
_lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
_lowerCamelCase = timm_model(inputs['''pixel_values'''] )
_lowerCamelCase = model(**__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 708
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 't5'
A_ = ['past_key_values']
A_ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , A_=3_21_28 , A_=5_12 , A_=64 , A_=20_48 , A_=6 , A_=None , A_=8 , A_=32 , A_=1_28 , A_=0.1 , A_=1E-6 , A_=1.0 , A_="relu" , A_=True , A_=True , A_=0 , A_=1 , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_kv
_lowerCamelCase = d_ff
_lowerCamelCase = num_layers
_lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase = num_heads
_lowerCamelCase = relative_attention_num_buckets
_lowerCamelCase = relative_attention_max_distance
_lowerCamelCase = dropout_rate
_lowerCamelCase = layer_norm_epsilon
_lowerCamelCase = initializer_factor
_lowerCamelCase = feed_forward_proj
_lowerCamelCase = use_cache
_lowerCamelCase = self.feed_forward_proj.split('''-''' )
_lowerCamelCase = act_info[-1]
_lowerCamelCase = act_info[0] == '''gated'''
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCamelCase = '''gelu_new'''
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowerCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_lowerCamelCase = '''past_encoder_sequence + sequence'''
_lowerCamelCase = {0: '''batch'''}
_lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='''inputs''' )
return common_inputs
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 13
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from __future__ import annotations
from math import pi
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import List
import numpy as np
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = {key: len(__UpperCAmelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , __UpperCAmelCase )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[range]:
'''simple docstring'''
_lowerCamelCase = []
for group_idx in range(__UpperCAmelCase ):
_lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase = range(__UpperCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(__UpperCAmelCase )
return shards_indices_per_group
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> List[dict]:
'''simple docstring'''
_lowerCamelCase = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
if num_shards == 1:
return [dict(__UpperCAmelCase )]
else:
_lowerCamelCase = _distribute_shards(num_shards=__UpperCAmelCase , max_num_jobs=__UpperCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCAmelCase ) )
]
def __magic_name__( __UpperCAmelCase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __UpperCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase = {len(__UpperCAmelCase ) for value in gen_kwargs.values() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
_lowerCamelCase = {}
for size in list_sizes:
_lowerCamelCase = list(range(__UpperCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase = dict(__UpperCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCamelCase = [value[i] for i in indices_per_size[len(__UpperCAmelCase )]]
return shuffled_kwargs
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['PoolFormerFeatureExtractor']
snake_case__ = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 711
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 638
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import argparse
import json
from tqdm import tqdm
def __magic_name__( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCAmelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCAmelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCAmelCase , help='''where to store parsed gold_data_path file''' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
_lowerCamelCase = json.load(__UpperCAmelCase )
for dpr_record in tqdm(__UpperCAmelCase ):
_lowerCamelCase = dpr_record['''question''']
_lowerCamelCase = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 638
| 0
|
from __future__ import annotations
snake_case__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
_lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
_lowerCamelCase = {}
_lowerCamelCase = source_vertex
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
_lowerCamelCase = {self.source_vertex}
_lowerCamelCase = None
_lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
_lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_ )
_lowerCamelCase = vertex
queue.append(A_ )
def UpperCamelCase_ ( self , A_ ) -> str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
_lowerCamelCase = self.parent.get(A_ )
if target_vertex_parent is None:
_lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(A_ )
return self.shortest_path(A_ ) + F'->{target_vertex}'
if __name__ == "__main__":
snake_case__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_lowerCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_lowerCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="relu" , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=20 , A_=2 , A_=1 , A_=0 , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = self.eos_token_id # Eos Token
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
_lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCamelCase = self.get_config()
_lowerCamelCase = prepare_mam_aaa_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = MaMaaaModel(config=A_ ).get_decoder().to(A_ ).eval()
_lowerCamelCase = inputs_dict['''input_ids''']
_lowerCamelCase = inputs_dict['''attention_mask''']
_lowerCamelCase = inputs_dict['''head_mask''']
# first forward pass
_lowerCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
_lowerCamelCase , _lowerCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCamelCase = model(A_ , attention_mask=A_ )['''last_hidden_state''']
_lowerCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[
'''last_hidden_state'''
]
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-2 ) )
def UpperCamelCase_ ( self , A_ , A_ ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = MaMaaaModel(config=A_ ).to(A_ ).eval()
_lowerCamelCase = model(**A_ )
_lowerCamelCase = outputs.encoder_last_hidden_state
_lowerCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = model.get_encoder()
encoder.save_pretrained(A_ )
_lowerCamelCase = MaMaaaEncoder.from_pretrained(A_ ).to(A_ )
_lowerCamelCase = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = model.get_decoder()
decoder.save_pretrained(A_ )
_lowerCamelCase = MaMaaaDecoder.from_pretrained(A_ ).to(A_ )
_lowerCamelCase = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=A_ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
A_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
A_ = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
A_ = True
A_ = True
A_ = False
A_ = False
def UpperCamelCase_ ( self , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = MaMaaaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(A_ , A_ ) )
if not self.is_encoder_decoder:
_lowerCamelCase = inputs['''input_ids''']
del inputs["input_ids"]
else:
_lowerCamelCase = inputs['''input_ids''']
_lowerCamelCase = inputs.get('''decoder_input_ids''' , A_ )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , A_ )
_lowerCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCamelCase = wte(A_ )
else:
_lowerCamelCase = wte(A_ )
_lowerCamelCase = wte(A_ )
with torch.no_grad():
model(**A_ )[0]
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase = input_dict['''input_ids''']
_lowerCamelCase = input_ids.ne(1 ).to(A_ )
_lowerCamelCase = MaMaaaForConditionalGeneration(A_ ).eval().to(A_ )
if torch_device == "cuda":
model.half()
model.generate(A_ , attention_mask=A_ )
model.generate(num_beams=4 , do_sample=A_ , early_stopping=A_ , num_return_sequences=3 )
def __magic_name__( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return torch.tensor(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
snake_case__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(A_ )
_lowerCamelCase = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
_lowerCamelCase = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
_lowerCamelCase = prepare_mam_aaa_inputs_dict(model.config , A_ , A_ )
with torch.no_grad():
_lowerCamelCase = model(**A_ )[0]
_lowerCamelCase = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , A_ )
# change to expected output here
_lowerCamelCase = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=A_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(A_ )
# change to intended input
_lowerCamelCase = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
_lowerCamelCase = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
_lowerCamelCase = prepare_mam_aaa_inputs_dict(model.config , A_ , A_ )
with torch.no_grad():
_lowerCamelCase = model(**A_ )[0]
_lowerCamelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , A_ )
# change to expected output here
_lowerCamelCase = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=A_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(A_ )
_lowerCamelCase = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
_lowerCamelCase = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCamelCase = tokenizer(A_ , padding=A_ , return_tensors='''pt''' )
_lowerCamelCase = model.generate(
input_ids=dct['''input_ids'''].to(A_ ) , attention_mask=dct['''attention_mask'''].to(A_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
_lowerCamelCase = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
_lowerCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=A_ , skip_special_tokens=A_ )
assert generated == expected_en
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 638
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __magic_name__( __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.mean(1 )
# Centralize the data of class i
_lowerCamelCase = data - column_reshape(__UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowerCamelCase = features.mean(1 )
_lowerCamelCase = np.nan
for i in range(__UpperCAmelCase ):
_lowerCamelCase = features[:, labels == i]
_lowerCamelCase = data.shape[1]
_lowerCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCamelCase = device_data * np.dot(
column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase ) , (column_reshape(__UpperCAmelCase ) - column_reshape(__UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
if features.any():
_lowerCamelCase = features.mean(1 )
# Center the dataset
_lowerCamelCase = features - np.reshape(__UpperCAmelCase , (data_mean.size, 1) )
_lowerCamelCase = np.dot(__UpperCAmelCase , centered_data.T ) / features.shape[1]
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(__UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCamelCase = np.dot(filtered_eigenvectors.T , __UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCamelCase , _lowerCamelCase = eigh(
covariance_between_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , covariance_within_classes(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , )
_lowerCamelCase = eigenvectors[:, ::-1][:, :dimensions]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = np.linalg.svd(__UpperCAmelCase )
_lowerCamelCase = svd_matrix[:, 0:dimensions]
_lowerCamelCase = np.dot(filtered_svd_matrix.T , __UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCamelCase = np.array([0, 0, 0, 1, 1] )
_lowerCamelCase = 2
_lowerCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = linear_discriminant_analysis(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __magic_name__( ) -> None:
'''simple docstring'''
_lowerCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCamelCase = 2
_lowerCamelCase = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__UpperCAmelCase ) as error_info:
_lowerCamelCase = principal_component_analysis(__UpperCAmelCase , __UpperCAmelCase )
if not np.allclose(__UpperCAmelCase , __UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
snake_case__ = logging.getLogger(__name__)
snake_case__ = tf.data.AUTOTUNE
def __magic_name__( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__UpperCAmelCase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__UpperCAmelCase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__UpperCAmelCase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__UpperCAmelCase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__UpperCAmelCase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__UpperCAmelCase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__UpperCAmelCase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__UpperCAmelCase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__UpperCAmelCase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__UpperCAmelCase , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__UpperCAmelCase , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCAmelCase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__UpperCAmelCase , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__UpperCAmelCase , help='''Model ID to upload to on the Hugging Face Hub.''' )
_lowerCamelCase = parser.parse_args()
return args
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
try:
if args.tpu_name:
_lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__UpperCAmelCase )
tf.tpu.experimental.initialize_tpu_system(__UpperCAmelCase )
return tpu
def __magic_name__( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase = 0
for file in file_list:
_lowerCamelCase = file.split('''/''' )[-1]
_lowerCamelCase = re.search(r'''-\d+-(\d+)\.tfrecord''' , __UpperCAmelCase ).group(1 )
_lowerCamelCase = int(__UpperCAmelCase )
num_samples += sample_count
return num_samples
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
_lowerCamelCase = count_samples(__UpperCAmelCase )
_lowerCamelCase = tf.data.Dataset.from_tensor_slices(__UpperCAmelCase )
if shuffle:
_lowerCamelCase = dataset.shuffle(len(__UpperCAmelCase ) )
_lowerCamelCase = tf.data.TFRecordDataset(__UpperCAmelCase , num_parallel_reads=__UpperCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_lowerCamelCase = dataset.apply(tf.data.experimental.assert_cardinality(__UpperCAmelCase ) )
_lowerCamelCase = dataset.map(__UpperCAmelCase , num_parallel_calls=__UpperCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
_lowerCamelCase = dataset.shuffle(args.shuffle_buffer_size )
_lowerCamelCase = dataset.batch(__UpperCAmelCase , drop_remainder=__UpperCAmelCase )
_lowerCamelCase = dataset.map(__UpperCAmelCase , num_parallel_calls=__UpperCAmelCase )
_lowerCamelCase = dataset.prefetch(__UpperCAmelCase )
return dataset
def __magic_name__( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
_lowerCamelCase = initialize_tpu(__UpperCAmelCase )
_lowerCamelCase = tf.distribute.TPUStrategy(__UpperCAmelCase )
else:
_lowerCamelCase = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer )
_lowerCamelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
_lowerCamelCase = tokenizer.vocab_size
_lowerCamelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_lowerCamelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_lowerCamelCase = count_samples(__UpperCAmelCase )
_lowerCamelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_lowerCamelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
_lowerCamelCase = TFAutoModelForMaskedLM.from_config(__UpperCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_lowerCamelCase , _lowerCamelCase = create_optimizer(
num_train_steps=__UpperCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__UpperCAmelCase , metrics=['''accuracy'''] )
def decode_fn(__UpperCAmelCase ):
_lowerCamelCase = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__UpperCAmelCase , __UpperCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_lowerCamelCase = DataCollatorForLanguageModeling(
tokenizer=__UpperCAmelCase , mlm_probability=args.mlm_probability , mlm=__UpperCAmelCase , return_tensors='''tf''' )
def mask_with_collator(__UpperCAmelCase ):
# TF really needs an isin() function
_lowerCamelCase = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
_lowerCamelCase , _lowerCamelCase = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__UpperCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__UpperCAmelCase , )
return batch
_lowerCamelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
_lowerCamelCase = prepare_dataset(
__UpperCAmelCase , decode_fn=__UpperCAmelCase , mask_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase , shuffle=__UpperCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
_lowerCamelCase = prepare_dataset(
__UpperCAmelCase , decode_fn=__UpperCAmelCase , mask_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase , shuffle=__UpperCAmelCase , )
_lowerCamelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__UpperCAmelCase ) )
model.fit(
__UpperCAmelCase , validation_data=__UpperCAmelCase , epochs=args.num_epochs , callbacks=__UpperCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
snake_case__ = parse_args()
main(args)
| 716
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['vqvae']
def __init__( self , A_ , A_ , A_ , A_ , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , A_ ) else 10_00
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 0 , A_ = 0 , A_ = None , A_ = 0 , A_ = None , A_ = None , A_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_lowerCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
_lowerCamelCase = noise
_lowerCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_ )
_lowerCamelCase = self.mel.audio_slice_to_image(A_ )
_lowerCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_lowerCamelCase = (input_image / 2_55) * 2 - 1
_lowerCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCamelCase = self.vqvae.encode(torch.unsqueeze(A_ , 0 ) ).latent_dist.sample(
generator=A_ )[0]
_lowerCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1] )
_lowerCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCamelCase = int(mask_start_secs * pixels_per_second )
_lowerCamelCase = int(mask_end_secs * pixels_per_second )
_lowerCamelCase = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A_ ):
_lowerCamelCase = self.unet(A_ , A_ , A_ )['''sample''']
else:
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
if isinstance(self.scheduler , A_ ):
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
_lowerCamelCase = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_lowerCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCamelCase = 1 / self.vqvae.config.scaling_factor * images
_lowerCamelCase = self.vqvae.decode(A_ )['''sample''']
_lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_lowerCamelCase = (images * 2_55).round().astype('''uint8''' )
_lowerCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_lowerCamelCase = [self.mel.image_to_audio(A_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(A_ ) )
@torch.no_grad()
def UpperCamelCase_ ( self , A_ , A_ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , A_ )
self.scheduler.set_timesteps(A_ )
_lowerCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCamelCase = (sample / 2_55) * 2 - 1
_lowerCamelCase = torch.Tensor(A_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_lowerCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCamelCase = self.scheduler.alphas_cumprod[t]
_lowerCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCamelCase = 1 - alpha_prod_t
_lowerCamelCase = self.unet(A_ , A_ )['''sample''']
_lowerCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( A_ , A_ , A_ ) -> torch.Tensor:
"""simple docstring"""
_lowerCamelCase = acos(torch.dot(torch.flatten(A_ ) , torch.flatten(A_ ) ) / torch.norm(A_ ) / torch.norm(A_ ) )
return sin((1 - alpha) * theta ) * xa / sin(A_ ) + sin(alpha * theta ) * xa / sin(A_ )
| 638
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case__ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng"
| 638
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.