code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_a = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
return torch.atana(__lowerCAmelCase , __lowerCAmelCase ) / math.pi * 2
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__lowerCAmelCase , __lowerCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
pass
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(UpperCAmelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase )
def __A ( __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
_a = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
_a = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
_a = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
_a = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
_a = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
_a = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __A ( __lowerCAmelCase )-> Any:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __A ( __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(__lowerCAmelCase ) and not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return name.replace(__lowerCAmelCase , __lowerCAmelCase )
elif name.startswith(__lowerCAmelCase ):
return [name.replace(__lowerCAmelCase , __lowerCAmelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase=13 )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_UpperCAmelCase = 0
if string.startswith('net.3.' ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith('net.' ):
_UpperCAmelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith('main.' ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = 'mid_block'
elif depth > 0 and int(__lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(__lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F"""up_blocks.{max_depth - 1}""" if int(__lowerCAmelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(__lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(__lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = prefix + '.' + new_layer + '.' + string_left
else:
_UpperCAmelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def __A ( __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(__lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
if len(__lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __A ( __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_UpperCAmelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_UpperCAmelCase = download(__lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]['sample_rate']
_UpperCAmelCase = MODELS_MAP[model_name]['sample_size']
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=__lowerCAmelCase , sample_rate=__lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(__lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__lowerCAmelCase )['state_dict'] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(__lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__lowerCAmelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(__lowerCAmelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(__lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=__lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=__lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(__lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , __lowerCAmelCase )
print('Diff max' , __lowerCAmelCase )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_a = parser.parse_args()
main(args)
| 39
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39
| 1
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
"""simple docstring"""
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = eval_examples
_UpperCAmelCase = post_process_function
def UpperCamelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = "eval" ):
"""simple docstring"""
_UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase = self.get_eval_dataloader(UpperCAmelCase )
_UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , metric_key_prefix=UpperCAmelCase , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase , UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions )
_UpperCAmelCase = self.compute_metrics(UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_UpperCAmelCase = metrics.pop(UpperCAmelCase )
metrics.update(output.metrics )
else:
_UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase )
return metrics
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase = "test" ):
"""simple docstring"""
_UpperCAmelCase = self.get_test_dataloader(UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCAmelCase = time.time()
try:
_UpperCAmelCase = eval_loop(
UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , metric_key_prefix=UpperCAmelCase , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase , UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions , 'predict' )
_UpperCAmelCase = self.compute_metrics(UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
_UpperCAmelCase = metrics.pop(UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase )
| 39
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39
| 1
|
from __future__ import annotations
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 1
|
def __A ( __lowerCAmelCase = 100 )-> int:
"""simple docstring"""
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 39
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __A ( __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_UpperCAmelCase = s_dict.pop(__lowerCAmelCase )
elif "subsample" in key:
_UpperCAmelCase = s_dict.pop(__lowerCAmelCase )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = mam_aaa['args']
_UpperCAmelCase = mam_aaa['model']
_UpperCAmelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
_UpperCAmelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
_UpperCAmelCase = args.share_decoder_input_output_embed
_UpperCAmelCase = [int(__lowerCAmelCase ) for i in args.conv_kernel_sizes.split(',' )]
_UpperCAmelCase = SpeechaTextConfig(
vocab_size=__lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(__lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowerCAmelCase , num_beams=5 , max_length=200 , use_cache=__lowerCAmelCase , decoder_start_token_id=2 , early_stopping=__lowerCAmelCase , )
_UpperCAmelCase = SpeechaTextForConditionalGeneration(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = lm_head_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_a = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 39
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCamelCase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessingTester(self )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 39
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = [image]
_UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
return image
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = min(int(num_inference_steps * strength ) , UpperCAmelCase )
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
if not isinstance(UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase )}""" )
_UpperCAmelCase = image.to(device=UpperCAmelCase , dtype=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase = init_latents.shape
_UpperCAmelCase = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
# get latents
print('add noise to latents at timestep' , UpperCAmelCase )
_UpperCAmelCase = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = 0.8 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ):
"""simple docstring"""
self.check_inputs(UpperCAmelCase )
# 2. Preprocess image
_UpperCAmelCase = preprocess(UpperCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
_UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device )
_UpperCAmelCase = timesteps[:1].repeat(UpperCAmelCase )
# 4. Prepare latent variables
_UpperCAmelCase = self.prepare_latents(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.unet.dtype , self.device , UpperCAmelCase )
_UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase , ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase )
| 39
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "unispeech"
def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=128 , UpperCAmelCase=16 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase=320 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=100 , UpperCAmelCase=256 , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=80 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=0.5 , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(UpperCAmelCase )
_UpperCAmelCase = list(UpperCAmelCase )
_UpperCAmelCase = list(UpperCAmelCase )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_ctc_classes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = feat_quantizer_dropout
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# pretraining loss
_UpperCAmelCase = replace_prob
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 39
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 1
|
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
for j in range(i + 1 , __lowerCAmelCase ):
if numbers[j] < numbers[i]:
_UpperCAmelCase , _UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39
| 1
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_a = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_a = F'''down_blocks.{i}.resnets.{j}.'''
_a = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_a = F'''down_blocks.{i}.attentions.{j}.'''
_a = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_a = F'''up_blocks.{i}.resnets.{j}.'''
_a = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_a = F'''up_blocks.{i}.attentions.{j}.'''
_a = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_a = F'''down_blocks.{i}.downsamplers.0.conv.'''
_a = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_a = F'''up_blocks.{i}.upsamplers.0.'''
_a = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_a = '''mid_block.attentions.0.'''
_a = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_a = F'''mid_block.resnets.{j}.'''
_a = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = v
_UpperCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_a = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_a = F'''encoder.down_blocks.{i}.resnets.{j}.'''
_a = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_a = F'''down_blocks.{i}.downsamplers.0.'''
_a = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_a = F'''up_blocks.{i}.upsamplers.0.'''
_a = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_a = F'''decoder.up_blocks.{i}.resnets.{j}.'''
_a = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_a = F'''mid_block.resnets.{i}.'''
_a = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_a = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __A ( __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __A ( __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCAmelCase = v.replace(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = v
_UpperCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCAmelCase = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
_UpperCAmelCase = reshape_weight_for_sd(__lowerCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_a = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_a = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_a = {'''q''': 0, '''k''': 1, '''v''': 2}
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
_UpperCAmelCase = k[: -len('.q_proj.weight' )]
_UpperCAmelCase = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
_UpperCAmelCase = [None, None, None]
_UpperCAmelCase = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
_UpperCAmelCase = k[: -len('.q_proj.bias' )]
_UpperCAmelCase = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
_UpperCAmelCase = [None, None, None]
_UpperCAmelCase = v
continue
_UpperCAmelCase = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase )
_UpperCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_UpperCAmelCase = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase )
_UpperCAmelCase = torch.cat(__lowerCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_UpperCAmelCase = textenc_pattern.sub(lambda __lowerCAmelCase : protected[re.escape(m.group(0 ) )] , __lowerCAmelCase )
_UpperCAmelCase = torch.cat(__lowerCAmelCase )
return new_state_dict
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_a = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_a = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_a = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_a = load_file(unet_path, device='''cpu''')
else:
_a = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_a = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_a = load_file(vae_path, device='''cpu''')
else:
_a = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_a = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_a = load_file(text_enc_path, device='''cpu''')
else:
_a = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_a = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_a = convert_unet_state_dict(unet_state_dict)
_a = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_a = convert_vae_state_dict(vae_state_dict)
_a = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_a = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_a = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_a = convert_text_enc_state_dict_vaa(text_enc_dict)
_a = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_a = convert_text_enc_state_dict(text_enc_dict)
_a = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_a = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 39
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
with open(UpperCAmelCase , encoding='utf-8' ) as input_file:
_UpperCAmelCase = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
_UpperCAmelCase = input_file.read()
_UpperCAmelCase = regexp.search(UpperCAmelCase )
return match
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
with open(UpperCAmelCase , encoding='utf-8' ) as input_file:
_UpperCAmelCase = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
_UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCAmelCase = regexp.finditer(UpperCAmelCase )
_UpperCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = Path('./datasets' )
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = Path('./datasets' )
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 39
|
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
from math import pi, sqrt, tan
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_UpperCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_UpperCAmelCase = (sidea + sidea + sidea) / 2
_UpperCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 39
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_a = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 39
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_a = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
_a = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = create_model(
'HTSAT-tiny' , 'roberta' , __lowerCAmelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__lowerCAmelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = R'.*sequential.(\d+).*'
_UpperCAmelCase = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase = key.replace(__lowerCAmelCase , __lowerCAmelCase )
if re.match(__lowerCAmelCase , __lowerCAmelCase ):
# replace sequential layers with list
_UpperCAmelCase = re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 )
_UpperCAmelCase = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__lowerCAmelCase )//3}.linear.""" )
elif re.match(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase = 1 if projecton_layer == 0 else 2
_UpperCAmelCase = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase = value
_UpperCAmelCase = mixed_qkv.size(0 ) // 3
_UpperCAmelCase = mixed_qkv[:qkv_dim]
_UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase = query_layer
_UpperCAmelCase = key_layer
_UpperCAmelCase = value_layer
else:
_UpperCAmelCase = value
return model_state_dict
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = init_clap(__lowerCAmelCase , enable_fusion=__lowerCAmelCase )
clap_model.eval()
_UpperCAmelCase = clap_model.state_dict()
_UpperCAmelCase = rename_state_dict(__lowerCAmelCase )
_UpperCAmelCase = ClapConfig()
_UpperCAmelCase = enable_fusion
_UpperCAmelCase = ClapModel(__lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
transformers_config.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
_a = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 39
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = ScoreSdeVeScheduler()
_UpperCAmelCase = ScoreSdeVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
sde_ve.to(UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCAmelCase ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCAmelCase , return_dict=UpperCAmelCase )[
0
]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'google/ncsnpp-church-256'
_UpperCAmelCase = UNetaDModel.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = ScoreSdeVeScheduler.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = ScoreSdeVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
sde_ve.to(UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=UpperCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 1
|
from heapq import heappop, heappush
import numpy as np
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = grid.shape
_UpperCAmelCase = [-1, 1, 0, 0]
_UpperCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_UpperCAmelCase , _UpperCAmelCase = [(0, source)], set()
_UpperCAmelCase = np.full((rows, cols) , np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = np.empty((rows, cols) , dtype=__lowerCAmelCase )
_UpperCAmelCase = None
while queue:
((_UpperCAmelCase) , (_UpperCAmelCase)) = heappop(__lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_UpperCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_UpperCAmelCase , _UpperCAmelCase = predecessors[x, y]
path.append(__lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase , _UpperCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_UpperCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCAmelCase , (dist + 1, (nx, ny)) )
_UpperCAmelCase = dist + 1
_UpperCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = []
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_init_end' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_train_begin' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_train_end' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_epoch_begin' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_epoch_end' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_step_begin' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_step_end' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_evaluate' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_predict' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_save' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_log' )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
self.events.append('on_prediction_step' )
@require_torch
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
def UpperCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def UpperCamelCase ( self , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=64 , UpperCAmelCase=64 , UpperCAmelCase=None , UpperCAmelCase=False , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = RegressionDataset(length=UpperCAmelCase )
_UpperCAmelCase = RegressionDataset(length=UpperCAmelCase )
_UpperCAmelCase = RegressionModelConfig(a=UpperCAmelCase , b=UpperCAmelCase )
_UpperCAmelCase = RegressionPreTrainedModel(UpperCAmelCase )
_UpperCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=UpperCAmelCase , report_to=[] , **UpperCAmelCase )
return Trainer(
UpperCAmelCase , UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , callbacks=UpperCAmelCase , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
# Order doesn't matter
_UpperCAmelCase = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : cb.__name__ if isinstance(UpperCAmelCase , UpperCAmelCase ) else cb.__class__.__name__ )
_UpperCAmelCase = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : cb.__name__ if isinstance(UpperCAmelCase , UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(UpperCAmelCase , cba.__class__ )
elif not isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(cba.__class__ , UpperCAmelCase )
else:
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ['on_init_end', 'on_train_begin']
_UpperCAmelCase = 0
_UpperCAmelCase = len(trainer.get_eval_dataloader() )
_UpperCAmelCase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_trainer()
_UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase = self.get_trainer(disable_tqdm=UpperCAmelCase )
_UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCAmelCase )
expected_callbacks.remove(UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
_UpperCAmelCase = self.get_trainer()
_UpperCAmelCase = trainer.pop_callback(UpperCAmelCase )
self.assertEqual(cb.__class__ , UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
trainer.add_callback(UpperCAmelCase )
expected_callbacks.insert(0 , UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
# We can also add, pop, or remove by instance
_UpperCAmelCase = self.get_trainer()
_UpperCAmelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCAmelCase )
expected_callbacks.remove(UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
_UpperCAmelCase = self.get_trainer()
_UpperCAmelCase = trainer.callback_handler.callbacks[0]
_UpperCAmelCase = trainer.pop_callback(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
trainer.add_callback(UpperCAmelCase )
expected_callbacks.insert(0 , UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=UpperCAmelCase )
_UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase ) )
# Independent log/save/eval
_UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase ) )
_UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase ) )
_UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
_UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase ) )
_UpperCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
_UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase ) )
# A bit of everything
_UpperCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
_UpperCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase , self.get_expected_events(UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
_UpperCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCAmelCase ) in warn_mock.call_args[0][0]
| 39
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_a = False
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'A painting of a squirrel eating a burger '
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'A painting of a squirrel eating a burger '
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __A ( )-> Tuple:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCAmelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , __lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __A ( )-> List[Any]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCAmelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , __lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __A ( )-> int:
"""simple docstring"""
_UpperCAmelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , __lowerCAmelCase ):
pass
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , __lowerCAmelCase ) is None
with patch_submodule(_test_patching , 'len' , __lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = '__test_patch_submodule_start_and_stop_mock__'
_UpperCAmelCase = patch_submodule(_test_patching , 'open' , __lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __A ( )-> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCAmelCase = '__test_patch_submodule_successive_join__'
_UpperCAmelCase = '__test_patch_submodule_successive_dirname__'
_UpperCAmelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , __lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.rename' , __lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , __lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , __lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.path.join' , __lowerCAmelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , __lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __A ( )-> Any:
"""simple docstring"""
_UpperCAmelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , __lowerCAmelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , __lowerCAmelCase ):
pass
| 39
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = StableDiffusionDiffEditPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
_UpperCAmelCase = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(UpperCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe_loaded(**UpperCAmelCase )[0]
_UpperCAmelCase = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_mask_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe.generate_mask(**UpperCAmelCase )
_UpperCAmelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_UpperCAmelCase = np.array([0] * 9 )
_UpperCAmelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inversion_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe.invert(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_UpperCAmelCase = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
_UpperCAmelCase = DPMSolverMultistepScheduler(**UpperCAmelCase )
_UpperCAmelCase = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inversion_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe.invert(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_UpperCAmelCase = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase ( cls ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_UpperCAmelCase = raw_image.convert('RGB' ).resize((768, 768) )
_UpperCAmelCase = raw_image
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'a bowl of fruit'
_UpperCAmelCase = 'a bowl of pears'
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
_UpperCAmelCase = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'a bowl of fruit'
_UpperCAmelCase = 'a bowl of pears'
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
_UpperCAmelCase = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 39
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase , speech_processor=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , feature_extractor=UpperCAmelCase , )
def UpperCamelCase ( self , UpperCAmelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase=1_6000 , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase , return_tensors='pt' , sampling_rate=UpperCAmelCase ).input_features.to(self.device )
_UpperCAmelCase = self.speech_model.generate(UpperCAmelCase , max_length=48_0000 )
_UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , normalize=UpperCAmelCase )[
0
]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = 1
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = len(UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase , UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase )}.""" )
# get prompt text embeddings
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = text_embeddings.shape
_UpperCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase , 1 )
_UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase = 42
if negative_prompt is None:
_UpperCAmelCase = [''] * batch_size
elif type(UpperCAmelCase ) is not type(UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase )} !="""
F""" {type(UpperCAmelCase )}.""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = text_input_ids.shape[-1]
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='pt' , )
_UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase = uncond_embeddings.shape[1]
_UpperCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase , 1 )
_UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_UpperCAmelCase = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device='cpu' , dtype=UpperCAmelCase ).to(
self.device )
else:
_UpperCAmelCase = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = 1 / 0.1_82_15 * latents
_UpperCAmelCase = self.vae.decode(UpperCAmelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase )
| 39
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39
| 1
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_a = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True )-> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCAmelCase = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
_UpperCAmelCase = config_class.from_json_file(__lowerCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = True
print(F"""Building TensorFlow model from configuration: {config}""" )
_UpperCAmelCase = model_class(__lowerCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCAmelCase = cached_file(
__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(__lowerCAmelCase , __lowerCAmelCase )
if compare_with_pt_model:
_UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=__lowerCAmelCase ) # build the network
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__lowerCAmelCase , config=__lowerCAmelCase , state_dict=__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
_UpperCAmelCase = pto[0].numpy()
_UpperCAmelCase = tfo[0].numpy()
_UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(__lowerCAmelCase , save_format='h5' )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , )-> Tuple:
"""simple docstring"""
if args_model_type is None:
_UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
_UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(__lowerCAmelCase , start=1 ):
print('=' * 100 )
print(F""" Converting model type {j}/{len(__lowerCAmelCase )}: {model_type}""" )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__lowerCAmelCase , __lowerCAmelCase ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
_UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(__lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
_UpperCAmelCase = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
else:
_UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCAmelCase = cached_file(__lowerCAmelCase , __lowerCAmelCase , force_download=not use_cached_models )
else:
_UpperCAmelCase = model_shortcut_name
if os.path.isfile(__lowerCAmelCase ):
_UpperCAmelCase = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=__lowerCAmelCase , pytorch_checkpoint_path=__lowerCAmelCase , config_file=__lowerCAmelCase , tf_dump_path=os.path.join(__lowerCAmelCase , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=__lowerCAmelCase , )
if remove_cached_files:
os.remove(__lowerCAmelCase )
os.remove(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
_a = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 39
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __A ( __lowerCAmelCase )-> Dict:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCAmelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self , *__lowerCAmelCase , **__lowerCAmelCase ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCAmelCase , **__lowerCAmelCase )
return wrapper
| 39
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
_a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
_UpperCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(__lowerCAmelCase )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , __lowerCAmelCase )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
else:
_UpperCAmelCase = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True )-> int:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = UniSpeechSatConfig.from_pretrained(__lowerCAmelCase )
else:
_UpperCAmelCase = UniSpeechSatConfig()
_UpperCAmelCase = ''
if is_finetuned:
_UpperCAmelCase = UniSpeechSatForCTC(__lowerCAmelCase )
else:
_UpperCAmelCase = UniSpeechSatForPreTraining(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_a = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 39
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = IFInpaintingPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 39
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39
| 1
|
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
_UpperCAmelCase = len(bin(__lowerCAmelCase )[3:] )
_UpperCAmelCase = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
_UpperCAmelCase = (
(
'1'
+ '0' * (binary_number_length - len(__lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase__ = "CIDAS/clipseg-rd64-refined"
UpperCamelCase__ = "image_segmenter"
UpperCamelCase__ = CLIPSegForImageSegmentation
UpperCamelCase__ = ["image", "text"]
UpperCamelCase__ = ["image"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase , return_tensors='pt' )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
with torch.no_grad():
_UpperCAmelCase = self.model(**UpperCAmelCase ).logits
return logits
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 39
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
_UpperCAmelCase = size if size is not None else {'shortest_edge': 256}
_UpperCAmelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_UpperCAmelCase = get_size_dict(UpperCAmelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = get_size_dict(UpperCAmelCase )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(UpperCAmelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 39
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_a = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_a = {'''vinai/bartpho-syllable''': 1024}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(UpperCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "x" , __lowerCAmelCase = 10**-10 , __lowerCAmelCase = 1 , )-> complex:
"""simple docstring"""
_UpperCAmelCase = symbols(__lowerCAmelCase )
_UpperCAmelCase = lambdify(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = lambdify(__lowerCAmelCase , diff(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(__lowerCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(__lowerCAmelCase ) / diff_function(
__lowerCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39
| 1
|
# Algorithm for the pigeonhole sorting
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = min(__lowerCAmelCase ) # min() finds the minimum value
_UpperCAmelCase = max(__lowerCAmelCase ) # max() finds the maximum value
_UpperCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_UpperCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_UpperCAmelCase = 0
for count in range(__lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
_UpperCAmelCase = count + min_val
i += 1
def __A ( )-> Dict:
"""simple docstring"""
_UpperCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__lowerCAmelCase )
print('Sorted order is:' , ' '.join(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 39
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39
| 1
|
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
_UpperCAmelCase = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(__lowerCAmelCase ) , 'Postfix'.center(__lowerCAmelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=' | ' , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def __A ( __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
_UpperCAmelCase = ')' # change "(" to ")"
elif infix[i] == ")":
_UpperCAmelCase = '(' # change ")" to "("
return (infix_2_postfix(''.join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_a = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
_a = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 39
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['''names''', '''prefix''']
_a = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
_a = ['''encoding_errors''', '''on_bad_lines''']
_a = ['''date_format''']
@dataclass
class __lowerCamelCase ( datasets.BuilderConfig):
"""simple docstring"""
UpperCamelCase__ = ","
UpperCamelCase__ = None
UpperCamelCase__ = "infer"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = False
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = "."
UpperCamelCase__ = None
UpperCamelCase__ = '"'
UpperCamelCase__ = 0
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 0
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = None
UpperCamelCase__ = 1_0000
UpperCamelCase__ = None
UpperCamelCase__ = "strict"
UpperCamelCase__ = "error"
UpperCamelCase__ = None
def UpperCamelCase ( self ):
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase = self.delimiter
if self.column_names is not None:
_UpperCAmelCase = self.column_names
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowerCamelCase ( datasets.ArrowBasedBuilder):
"""simple docstring"""
UpperCamelCase__ = CsvConfig
def UpperCamelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(UpperCAmelCase , UpperCAmelCase )
return pa_table
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
_UpperCAmelCase = pd.read_csv(UpperCAmelCase , iterator=UpperCAmelCase , dtype=UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase ):
_UpperCAmelCase = pa.Table.from_pandas(UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}""" )
raise
| 39
|
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = (boundary[1] - boundary[0]) / steps
_UpperCAmelCase = boundary[0]
_UpperCAmelCase = boundary[1]
_UpperCAmelCase = make_points(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = 0.0
y += (h / 2.0) * f(__lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(__lowerCAmelCase )
y += (h / 2.0) * f(__lowerCAmelCase )
return y
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = a + h
while x < (b - h):
yield x
_UpperCAmelCase = x + h
def __A ( __lowerCAmelCase )-> Optional[Any]: # enter your function here
"""simple docstring"""
_UpperCAmelCase = (x - 0) * (x - 0)
return y
def __A ( )-> int:
"""simple docstring"""
_UpperCAmelCase = 0.0 # Lower bound of integration
_UpperCAmelCase = 1.0 # Upper bound of integration
_UpperCAmelCase = 10.0 # define number of steps or resolution
_UpperCAmelCase = [a, b] # define boundary of integration
_UpperCAmelCase = method_a(__lowerCAmelCase , __lowerCAmelCase )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 39
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39
| 1
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a = logging.get_logger(__name__)
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
_UpperCAmelCase = model_accelerate(UpperCAmelCase , UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(UpperCAmelCase , UpperCAmelCase )['sample']
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) )
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self , UpperCAmelCase=(32, 32) ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
| 39
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39
| 1
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(__lowerCAmelCase )
#
# convert them to integers
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '0' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '1' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39
| 1
|
import random
from typing import Any
def __A ( __lowerCAmelCase )-> list[Any]:
"""simple docstring"""
for _ in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase = random.randint(0 , len(__lowerCAmelCase ) - 1 )
_UpperCAmelCase = random.randint(0 , len(__lowerCAmelCase ) - 1 )
_UpperCAmelCase , _UpperCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
_a = [0, 1, 2, 3, 4, 5, 6, 7]
_a = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 39
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_UpperCAmelCase = 'segformer.encoder.' + key
if key.startswith('backbone' ):
_UpperCAmelCase = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
_UpperCAmelCase = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__lowerCAmelCase )-1}""" )
if "norm" in key:
_UpperCAmelCase = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_UpperCAmelCase = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_UpperCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase = key[key.find('block' ) + len('block' )]
_UpperCAmelCase = key.replace(F"""block{idx}""" , F"""block.{int(__lowerCAmelCase )-1}""" )
if "attn.q" in key:
_UpperCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_UpperCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_UpperCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_UpperCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_UpperCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_UpperCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_UpperCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
_UpperCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
_UpperCAmelCase = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__lowerCAmelCase )-1}""" )
if key.startswith('head' ):
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def __A ( )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = SegformerConfig()
_UpperCAmelCase = False
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
if "segformer" in model_name:
_UpperCAmelCase = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_UpperCAmelCase = 150
_UpperCAmelCase = 'ade20k-id2label.json'
_UpperCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
_UpperCAmelCase = 19
_UpperCAmelCase = 'cityscapes-id2label.json'
_UpperCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_UpperCAmelCase = True
_UpperCAmelCase = model_name[4:6]
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 256
elif size == "b2":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 4, 6, 3]
elif size == "b3":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 4, 18, 3]
elif size == "b4":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 8, 27, 3]
elif size == "b5":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCAmelCase , align=__lowerCAmelCase , do_random_crop=__lowerCAmelCase )
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )
else:
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
_UpperCAmelCase = rename_keys(__lowerCAmelCase , encoder_only=__lowerCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
_UpperCAmelCase = False
_UpperCAmelCase = SegformerForImageClassification(__lowerCAmelCase )
else:
_UpperCAmelCase = SegformerForSemanticSegmentation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_UpperCAmelCase = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_UpperCAmelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_a = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 39
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 39
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_UpperCAmelCase = test_metrics
@require_cpu
def UpperCamelCase ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
_UpperCAmelCase = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
| 39
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39
| 1
|
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
_UpperCAmelCase = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
_UpperCAmelCase = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(UpperCAmelCase , return_tensors='np' )
_UpperCAmelCase = processor(images=UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=UpperCAmelCase )
_UpperCAmelCase = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(UpperCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 39
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
_UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 39
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39
| 1
|
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
_UpperCAmelCase = [int(__lowerCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__lowerCAmelCase ) == 4 and all(0 <= int(__lowerCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_a = input().strip()
_a = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 39
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'num_encoder_blocks' ) )
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=64 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[8, 4, 2, 1] , UpperCAmelCase=[16, 32, 64, 128] , UpperCAmelCase=[1, 4, 8, 16] , UpperCAmelCase=[1, 2, 4, 8] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = SegformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SegformerModelTester(self )
_UpperCAmelCase = SegformerConfigTester(self , config_class=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(UpperCAmelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(UpperCAmelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase ):
continue
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> int:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
UpperCAmelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='pt' )
_UpperCAmelCase = encoded_inputs.pixel_values.to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(UpperCAmelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='pt' )
_UpperCAmelCase = encoded_inputs.pixel_values.to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase , atol=1e-1 ) )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
UpperCAmelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='pt' )
_UpperCAmelCase = encoded_inputs.pixel_values.to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(500, 300)] )
_UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
_UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
| 39
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_a = logging.getLogger(__name__)
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
_a = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_a = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_a = Counter()
for tk_ids in data:
counter.update(tk_ids)
_a = [0] * args.vocab_size
for k, v in counter.items():
_a = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 39
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
| 1
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '''__DUMMY_TRANSFORMERS_USER__'''
_a = '''Dummy User'''
_a = '''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'''
_a = '''https://hub-ci.huggingface.co'''
_a = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_a = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_a = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __lowerCAmelCase )
@pytest.fixture
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __lowerCAmelCase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __lowerCAmelCase )
@pytest.fixture
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __lowerCAmelCase )
@pytest.fixture
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
HfFolder.save_token(__lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def __A ( )-> Tuple:
"""simple docstring"""
return HfApi(endpoint=__lowerCAmelCase )
@pytest.fixture(scope='session' )
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = HfFolder.get_token()
HfFolder.save_token(__lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowerCAmelCase )
@pytest.fixture
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
def _cleanup_repo(__lowerCAmelCase ):
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def __A ( __lowerCAmelCase )-> Union[str, Any]:
"""simple docstring"""
@contextmanager
def _temporary_repo(__lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope='session' )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
_UpperCAmelCase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='data/text_data.txt' , repo_id=__lowerCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
_UpperCAmelCase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='data.zip' , repo_id=__lowerCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
_UpperCAmelCase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='data.zip' , repo_id=__lowerCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 39
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 1
|
import operator as op
_a = '''scaler.pt'''
_a = '''pytorch_model'''
_a = '''random_states'''
_a = '''optimizer'''
_a = '''scheduler'''
_a = '''pytorch_model.bin'''
_a = '''pytorch_model.bin.index.json'''
_a = '''model.safetensors'''
_a = '''model.safetensors.index.json'''
_a = '''1.10.2'''
_a = '''py38'''
_a = '''4.17.0'''
_a = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
_a = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
_a = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
_a = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
_a = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
_a = '''2.0.1'''
_a = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
_a = ['''default''', '''reduce-overhead''', '''max-autotune''']
_a = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_a = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
_a = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
_a = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( __lowerCAmelCase=None , __lowerCAmelCase=None )-> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
UpperCamelCase__ = field(
metadata={"help": "The csv file to plot."} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Disable logarithmic scale when plotting"} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
UpperCamelCase__ = list_field(
default=snake_case__ , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
try:
int(__lowerCAmelCase )
return True
except ValueError:
return False
def __A ( __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
try:
float(__lowerCAmelCase )
return True
except ValueError:
return False
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_UpperCAmelCase = csv.DictReader(UpperCAmelCase )
for row in reader:
_UpperCAmelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_UpperCAmelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_UpperCAmelCase = float(row['result'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage'
_UpperCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_UpperCAmelCase = self.result_dict[model_name]['result']
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_UpperCAmelCase = np.asarray(UpperCAmelCase , UpperCAmelCase )[: len(UpperCAmelCase )]
plt.scatter(
UpperCAmelCase , UpperCAmelCase , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(UpperCAmelCase , UpperCAmelCase , '--' )
title_str += F""" {label_model_name} vs."""
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(UpperCAmelCase )
plt.xlabel(UpperCAmelCase )
plt.ylabel(UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=__lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39
| 1
|
# flake8: noqa
# Lint as: python3
_a = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 39
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39
| 1
|
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '''▁'''
_a = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_a = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_a = {
'''facebook/xglm-564M''': 2048,
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 39
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_a = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_a = spec.loader.load_module()
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
_a = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __A ( )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(__lowerCAmelCase )
_UpperCAmelCase = _re_checkpoint.findall(__lowerCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = '\n'.join(sorted(__lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 39
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39
| 1
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = 8 , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
_UpperCAmelCase = pad_size
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = get_image_size(UpperCAmelCase )
_UpperCAmelCase = (old_height // size + 1) * size - old_height
_UpperCAmelCase = (old_width // size + 1) * size - old_width
return pad(UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_pad if do_pad is not None else self.do_pad
_UpperCAmelCase = pad_size if pad_size is not None else self.pad_size
_UpperCAmelCase = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_pad:
_UpperCAmelCase = [self.pad(UpperCAmelCase , size=UpperCAmelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 39
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "detr"
UpperCamelCase__ = ["past_key_values"]
UpperCamelCase__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=100 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , **UpperCAmelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_UpperCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = backbone_config.get('model_type' )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None, None, None
_UpperCAmelCase = use_timm_backbone
_UpperCAmelCase = backbone_config
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_queries
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = auxiliary_loss
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = backbone
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = dilation
# Hungarian matcher
_UpperCAmelCase = class_cost
_UpperCAmelCase = bbox_cost
_UpperCAmelCase = giou_cost
# Loss coefficients
_UpperCAmelCase = mask_loss_coefficient
_UpperCAmelCase = dice_loss_coefficient
_UpperCAmelCase = bbox_loss_coefficient
_UpperCAmelCase = giou_loss_coefficient
_UpperCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return cls(backbone_config=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = version.parse("1.11")
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 12
| 39
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "lilt"
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=None , UpperCAmelCase=4 , UpperCAmelCase=1024 , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = channel_shrink_ratio
_UpperCAmelCase = max_ad_position_embeddings
| 39
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
_UpperCAmelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCAmelCase = DDPMScheduler()
_UpperCAmelCase = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
_UpperCAmelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=UpperCAmelCase , steps=4 )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
_UpperCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_UpperCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
_UpperCAmelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vqvae_and_unet
_UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
_UpperCAmelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
_UpperCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
_UpperCAmelCase = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
_UpperCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_UpperCAmelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase = self.dummy_unet_condition
_UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
_UpperCAmelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
_UpperCAmelCase = torch.rand((1, 1, 10) )
_UpperCAmelCase = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_UpperCAmelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch_device
_UpperCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
_UpperCAmelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
_UpperCAmelCase = pipe(generator=UpperCAmelCase )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_UpperCAmelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 1
|
from math import pi
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 39
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39
| 1
|
def __A ( __lowerCAmelCase = 1_000 )-> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = 0
_UpperCAmelCase = fa + fa
_UpperCAmelCase , _UpperCAmelCase = fa, f
index += 1
for _ in str(__lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 39
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'hf-internal-testing/tiny-random-t5'
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = tokenizer('This is me' , return_tensors='pt' )
_UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCAmelCase = model.generate(**UpperCAmelCase )
_UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCAmelCase = model_reloaded.generate(**UpperCAmelCase )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'hf-internal-testing/tiny-random-t5'
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase )
_UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCAmelCase ):
model.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(UpperCAmelCase )
| 39
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39
| 1
|
from __future__ import annotations
def __A ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , )-> tuple[int, float, str]:
"""simple docstring"""
_UpperCAmelCase = cipher_alphabet or [chr(__lowerCAmelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCAmelCase = {
'a': 0.0_84_97,
'b': 0.0_14_92,
'c': 0.0_22_02,
'd': 0.0_42_53,
'e': 0.1_11_62,
'f': 0.0_22_28,
'g': 0.0_20_15,
'h': 0.0_60_94,
'i': 0.0_75_46,
'j': 0.0_01_53,
'k': 0.0_12_92,
'l': 0.0_40_25,
'm': 0.0_24_06,
'n': 0.0_67_49,
'o': 0.0_75_07,
'p': 0.0_19_29,
'q': 0.0_00_95,
'r': 0.0_75_87,
's': 0.0_63_27,
't': 0.0_93_56,
'u': 0.0_27_58,
'v': 0.0_09_78,
'w': 0.0_25_60,
'x': 0.0_01_50,
'y': 0.0_19_94,
'z': 0.0_00_77,
}
else:
# Custom frequencies dictionary
_UpperCAmelCase = frequencies_dict
if not case_sensitive:
_UpperCAmelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCAmelCase = decrypted_with_shift.lower().count(__lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCAmelCase = decrypted_with_shift.count(__lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCAmelCase = min(
__lowerCAmelCase , key=__lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 39
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
"""simple docstring"""
UpperCamelCase__ = XGLMConfig
UpperCamelCase__ = {}
UpperCamelCase__ = "gelu"
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = None
_UpperCAmelCase = 0
_UpperCAmelCase = 2
_UpperCAmelCase = 1
def UpperCamelCase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFXGLMModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFXGLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self , UpperCAmelCase=True ):
"""simple docstring"""
_UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_UpperCAmelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_UpperCAmelCase = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_UpperCAmelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
_UpperCAmelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_UpperCAmelCase = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase , seed=[7, 0] )
_UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_UpperCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_UpperCAmelCase = 'left'
# use different length sentences to test batching
_UpperCAmelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_UpperCAmelCase = tokenizer(UpperCAmelCase , return_tensors='tf' , padding=UpperCAmelCase )
_UpperCAmelCase = inputs['input_ids']
_UpperCAmelCase = model.generate(input_ids=UpperCAmelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(input_ids=UpperCAmelCase , max_new_tokens=12 )
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(input_ids=UpperCAmelCase , max_new_tokens=12 )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 39
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 1
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["keras_nlp"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['keras_nlp'] )
| 39
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=0 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = projection_dim
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
_UpperCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRContextEncoder(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRQuestionEncoder(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRReader(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFDPRModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRReader.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_UpperCAmelCase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_UpperCAmelCase = model(UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 39
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39
| 1
|
import os
def __A ( __lowerCAmelCase = "input.txt" )-> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file:
_UpperCAmelCase = [
[int(__lowerCAmelCase ) for element in line.split(',' )]
for line in input_file.readlines()
]
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = len(matrix[0] )
_UpperCAmelCase = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = matrix[i][0]
for j in range(1 , __lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCAmelCase ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 39
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39
| 1
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = StableDiffusionInpaintPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCAmelCase )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(UpperCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_UpperCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInpaintPipeline(**UpperCAmelCase )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = sd_pipe(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_UpperCAmelCase = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_UpperCAmelCase = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCAmelCase = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCAmelCase = PNDMScheduler.from_pretrained(UpperCAmelCase , subfolder='scheduler' )
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase , safety_checker=UpperCAmelCase , scheduler=UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 39
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 1
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def __A ( __lowerCAmelCase , __lowerCAmelCase=0.9_99 , __lowerCAmelCase="cosine" , )-> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCAmelCase = []
for i in range(__lowerCAmelCase ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __lowerCamelCase ( snake_case__ , snake_case__):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = 0.00_01 , UpperCAmelCase = 0.02 , UpperCAmelCase = "linear" , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 0 , UpperCAmelCase = "epsilon" , UpperCAmelCase = 1.0 , **UpperCAmelCase , ):
"""simple docstring"""
if kwargs.get('set_alpha_to_one' , UpperCAmelCase ) is not None:
_UpperCAmelCase = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , UpperCAmelCase , standard_warn=UpperCAmelCase )
_UpperCAmelCase = kwargs['set_alpha_to_one']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(UpperCAmelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , UpperCAmelCase ).copy().astype(np.intaa ) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
return sample
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
self.timesteps += self.config.steps_offset
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , ):
"""simple docstring"""
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = "" , UpperCAmelCase = False ):
"""simple docstring"""
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 0
for q, w in zip(self.prefix , UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=UpperCAmelCase , is_leaf=UpperCAmelCase )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.nodes.get(word[0] , UpperCAmelCase )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__lowerCAmelCase )
print('Words:' , __lowerCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 39
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_UpperCAmelCase = self.head
while self.head:
yield node.data
_UpperCAmelCase = node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(UpperCAmelCase ) for item in iter(self ) )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ) , UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
self.insert_nth(0 , UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_UpperCAmelCase = Node(UpperCAmelCase )
if self.head is None:
_UpperCAmelCase = new_node # first node points itself
_UpperCAmelCase = _UpperCAmelCase = new_node
elif index == 0: # insert at head
_UpperCAmelCase = self.head
_UpperCAmelCase = _UpperCAmelCase = new_node
else:
_UpperCAmelCase = self.head
for _ in range(index - 1 ):
_UpperCAmelCase = temp.next
_UpperCAmelCase = temp.next
_UpperCAmelCase = new_node
if index == len(self ) - 1: # insert at tail
_UpperCAmelCase = new_node
def UpperCamelCase ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , UpperCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_UpperCAmelCase = self.head
if self.head == self.tail: # just one node
_UpperCAmelCase = _UpperCAmelCase = None
elif index == 0: # delete head node
_UpperCAmelCase = self.tail.next.next
_UpperCAmelCase = self.head.next
else:
_UpperCAmelCase = self.head
for _ in range(index - 1 ):
_UpperCAmelCase = temp.next
_UpperCAmelCase = temp.next
_UpperCAmelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
_UpperCAmelCase = temp
return delete_node.data
def UpperCamelCase ( self ):
"""simple docstring"""
return len(self ) == 0
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = CircularLinkedList()
assert len(__lowerCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__lowerCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__lowerCAmelCase ) == i
circular_linked_list.insert_nth(__lowerCAmelCase , i + 1 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase_ :
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = '''gelu'''
def __init__( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=13 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[int]=37 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[Any]=20 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : str=1 , __UpperCAmelCase : str=0 , ) ->List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a = prepare_blenderbot_small_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) ->Dict:
"""simple docstring"""
a = TFBlenderbotSmallModel(config=__UpperCAmelCase ).get_decoder()
a = inputs_dict['''input_ids''']
a = input_ids[:1, :]
a = inputs_dict['''attention_mask'''][:1, :]
a = inputs_dict['''head_mask''']
a = 1
# first forward pass
a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 )
def _a ( a :int , a :List[str] , a :Optional[int] , a :str=None , a :Optional[Any]=None , a :List[str]=None , a :Tuple=None , a :List[Any]=None , ) -> Optional[int]:
if attention_mask is None:
a = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ ( lowercase , lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
a = TFBlenderbotSmallModelTester(self )
a = ConfigTester(self , config_class=__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_tokenizers
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__snake_case = '''facebook/blenderbot_small-90M'''
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer(self.src_text , return_tensors='''tf''' )
a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 0
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__a ).to(__a )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 1
|
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ : Optional[int] = """ViTImageProcessor"""
lowerCAmelCase__ : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__(self : List[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase , )
lowercase__ = kwargs.pop('''feature_extractor''' )
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__(self : Optional[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[str]=None , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowercase__ = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if visual_prompt is not None:
lowercase__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
lowercase__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if visual_prompt is not None and images is not None:
lowercase__ = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase__ = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase__ (self : Tuple , *UpperCamelCase : List[str] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ (self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : int ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase , )
return self.image_processor
| 2
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=36 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Dict:
"""simple docstring"""
A : Optional[int] = parent
A : Optional[int] = batch_size
A : Union[str, Any] = seq_length
A : Union[str, Any] = is_training
A : Optional[Any] = use_input_mask
A : Tuple = use_token_type_ids
A : Optional[int] = use_labels
A : Tuple = vocab_size
A : Optional[Any] = embedding_size
A : str = hidden_size
A : Dict = num_hidden_layers
A : str = num_hidden_groups
A : Dict = num_attention_heads
A : List[Any] = intermediate_size
A : List[str] = hidden_act
A : Optional[Any] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Any = max_position_embeddings
A : Any = type_vocab_size
A : Dict = type_sequence_label_size
A : Optional[int] = initializer_range
A : List[Any] = num_labels
A : int = num_choices
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str = None
if self.use_input_mask:
A : int = random_attention_mask([self.batch_size, self.seq_length] )
A : Tuple = None
if self.use_token_type_ids:
A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Tuple = None
A : List[Any] = None
A : List[Any] = None
if self.use_labels:
A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Any = ids_tensor([self.batch_size] , self.num_choices )
A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Any = AlbertModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Dict = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
A : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : int = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , sentence_order_label=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Any = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.num_labels
A : str = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = self.num_labels
A : str = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = self.num_choices
A : List[str] = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : List[str] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : str = config_and_inputs
A : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
A : Dict = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
A : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
A : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Any = AlbertModelTester(self )
A : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : str = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : int = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = AlbertModel.from_pretrained('''albert-base-v2''' )
A : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
A : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Optional[int] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 3
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case ={
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__snake_case ={
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__snake_case ={
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
__snake_case ={
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
__snake_case ={
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
__snake_case ={
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
def a_ ( lowerCamelCase : Tuple ):
if isinstance(lowerCamelCase , lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=False ):
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCAmelCase = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]=None ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCAmelCase = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCAmelCase = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] ):
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
lowerCAmelCase = {}
lowerCAmelCase = checkpoint['time_embed.0.weight']
lowerCAmelCase = checkpoint['time_embed.0.bias']
lowerCAmelCase = checkpoint['time_embed.2.weight']
lowerCAmelCase = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase = checkpoint['label_emb.weight']
lowerCAmelCase = checkpoint['input_blocks.0.0.weight']
lowerCAmelCase = checkpoint['input_blocks.0.0.bias']
lowerCAmelCase = unet_config['down_block_types']
lowerCAmelCase = unet_config['layers_per_block']
lowerCAmelCase = unet_config['attention_head_dim']
lowerCAmelCase = unet_config['block_out_channels']
lowerCAmelCase = 1
lowerCAmelCase = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase ):
lowerCAmelCase = channels_list[i]
lowerCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase ):
lowerCAmelCase = f'''down_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''input_blocks.{current_layer}.0'''
lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase ):
lowerCAmelCase = f'''down_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''input_blocks.{current_layer}.0'''
lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
lowerCAmelCase = f'''down_blocks.{i}.attentions.{j}'''
lowerCAmelCase = f'''input_blocks.{current_layer}.1'''
lowerCAmelCase = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
lowerCAmelCase = f'''down_blocks.{i}.downsamplers.0'''
lowerCAmelCase = f'''input_blocks.{current_layer}.0'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
lowerCAmelCase = current_channels
# hardcoded the mid-block for now
lowerCAmelCase = 'mid_block.resnets.0'
lowerCAmelCase = 'middle_block.0'
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 'mid_block.attentions.0'
lowerCAmelCase = 'middle_block.1'
lowerCAmelCase = convert_attention(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 'mid_block.resnets.1'
lowerCAmelCase = 'middle_block.2'
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 0
lowerCAmelCase = unet_config['up_block_types']
for i, layer_type in enumerate(lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase = f'''up_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''output_blocks.{current_layer}.0'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
lowerCAmelCase = f'''up_blocks.{i}.upsamplers.0'''
lowerCAmelCase = f'''output_blocks.{current_layer-1}.1'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase = f'''up_blocks.{i}.resnets.{j}'''
lowerCAmelCase = f'''output_blocks.{current_layer}.0'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase )
lowerCAmelCase = f'''up_blocks.{i}.attentions.{j}'''
lowerCAmelCase = f'''output_blocks.{current_layer}.1'''
lowerCAmelCase = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
current_layer += 1
if i != len(lowerCamelCase ) - 1:
lowerCAmelCase = f'''up_blocks.{i}.upsamplers.0'''
lowerCAmelCase = f'''output_blocks.{current_layer-1}.2'''
lowerCAmelCase = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = checkpoint['out.0.weight']
lowerCAmelCase = checkpoint['out.0.bias']
lowerCAmelCase = checkpoint['out.2.weight']
lowerCAmelCase = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
__snake_case =parser.parse_args()
__snake_case =strabool(args.class_cond)
__snake_case =os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case =TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case =None
__snake_case =con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case =CMStochasticIterativeScheduler(**scheduler_config)
__snake_case =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 4
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39
| 0
|
UpperCAmelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def UpperCAmelCase_ ( __snake_case ) -> list[list[int]]:
"""simple docstring"""
_lowercase =len(__snake_case ) * [False]
_lowercase ={vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_lowercase =[]
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_lowercase =[]
_lowercase =len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_lowercase =order[len(__snake_case ) - i - 1]
if not visited[vert]:
_lowercase =find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 5
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : str = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 15_81
lowerCAmelCase_ = 15_17
lowerCAmelCase_ = 15_70
lowerCAmelCase_ = 15_84
lowerCAmelCase_ = 17_93
lowerCAmelCase_ = 17_95
lowerCAmelCase_ = 19_16
lowerCAmelCase_ = 18_64
lowerCAmelCase_ = 19_05
lowerCAmelCase_ = 19_19
lowerCAmelCase_ = 24_29
lowerCAmelCase_ = 22_08
lowerCAmelCase_ = 24_18
lowerCAmelCase_ = 23_23
lowerCAmelCase_ = 24_07
# @@protoc_insertion_point(module_scope)
| 8
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : int ='PoolFormerConfig'
# Base docstring
__lowerCAmelCase : Tuple ='sail/poolformer_s12'
__lowerCAmelCase : str =[1, 5_1_2, 7, 7]
# Image classification docstring
__lowerCAmelCase : Any ='sail/poolformer_s12'
__lowerCAmelCase : Dict ='tabby, tabby cat'
__lowerCAmelCase : int =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _UpperCamelCase ( lowercase__ , lowercase__ = 0.0 , lowercase__ = False ):
if drop_prob == 0.0 or not training:
return input
__SCREAMING_SNAKE_CASE : Any = 1 - drop_prob
__SCREAMING_SNAKE_CASE : Tuple = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__SCREAMING_SNAKE_CASE : Union[str, Any] = keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__SCREAMING_SNAKE_CASE : Optional[Any] = input.div(lowercase__ ) * random_tensor
return output
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Optional[float] = None ) -> None:
super().__init__()
__SCREAMING_SNAKE_CASE : int = drop_prob
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.Tensor ) -> torch.Tensor:
return drop_path(lowerCAmelCase__ , self.drop_prob , self.training )
def __magic_name__( self :Any ) -> str:
return "p={}".format(self.drop_prob )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict=None ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
__SCREAMING_SNAKE_CASE : Union[str, Any] = stride if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (stride, stride)
__SCREAMING_SNAKE_CASE : Any = padding if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (padding, padding)
__SCREAMING_SNAKE_CASE : Any = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = norm_layer(lowerCAmelCase__ ) if norm_layer else nn.Identity()
def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.norm(lowerCAmelCase__ )
return embeddings
class _lowercase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Optional[int] ) -> Any:
super().__init__(1 , lowerCAmelCase__ , **lowerCAmelCase__ )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Dict ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE : List[str] = nn.AvgPoolad(lowerCAmelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :Tuple ) -> str:
return self.pool(lowerCAmelCase__ ) - hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
super().__init__()
__SCREAMING_SNAKE_CASE : List[str] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE : Dict = PoolFormerDropPath(lowerCAmelCase__ )
if isinstance(config.hidden_act , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = config.hidden_act
def __magic_name__( self :Any , lowerCAmelCase__ :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = self.conva(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.act_fn(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.drop(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.conva(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.drop(lowerCAmelCase__ )
return hidden_states
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PoolFormerPooling(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = PoolFormerOutput(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PoolFormerGroupNorm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = PoolFormerGroupNorm(lowerCAmelCase__ )
# Useful for training neural nets
__SCREAMING_SNAKE_CASE : Optional[Any] = PoolFormerDropPath(lowerCAmelCase__ ) if drop_path > 0.0 else nn.Identity()
__SCREAMING_SNAKE_CASE : int = config.use_layer_scale
if config.use_layer_scale:
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
if self.use_layer_scale:
__SCREAMING_SNAKE_CASE : int = self.pooling(self.before_norm(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__SCREAMING_SNAKE_CASE : Any = hidden_states + self.drop_path(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = ()
__SCREAMING_SNAKE_CASE : Dict = self.output(self.after_norm(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states + self.drop_path(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = (output,) + outputs
return outputs
else:
__SCREAMING_SNAKE_CASE : Tuple = self.drop_path(self.pooling(self.before_norm(lowerCAmelCase__ ) ) )
# First residual connection
__SCREAMING_SNAKE_CASE : Dict = pooling_output + hidden_states
__SCREAMING_SNAKE_CASE : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__SCREAMING_SNAKE_CASE : int = self.drop_path(self.output(self.after_norm(lowerCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE : str = hidden_states + layer_output
__SCREAMING_SNAKE_CASE : str = (output,) + outputs
return outputs
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = config
# stochastic depth decay rule
__SCREAMING_SNAKE_CASE : List[str] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__SCREAMING_SNAKE_CASE : Dict = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(lowerCAmelCase__ )
# Transformer blocks
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__SCREAMING_SNAKE_CASE : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowerCAmelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList(lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :str=True ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = () if output_hidden_states else None
__SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = layers
# Get patch embeddings from hidden_states
__SCREAMING_SNAKE_CASE : Dict = embedding_layer(lowerCAmelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = blk(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_outputs[0]
if output_hidden_states:
__SCREAMING_SNAKE_CASE : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = PoolFormerConfig
SCREAMING_SNAKE_CASE__ : Optional[int] = '''poolformer'''
SCREAMING_SNAKE_CASE__ : Any = '''pixel_values'''
SCREAMING_SNAKE_CASE__ : Tuple = True
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int ) -> Optional[int]:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = value
__lowerCAmelCase : Optional[int] =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase : Dict =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , A__ , )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :List[str] ) -> str:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = config
__SCREAMING_SNAKE_CASE : Dict = PoolFormerEncoder(lowerCAmelCase__ )
# Initialize weights and apply final processing
self.post_init()
def __magic_name__( self :Dict ) -> int:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__SCREAMING_SNAKE_CASE : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__SCREAMING_SNAKE_CASE : str = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Any ) -> List[str]:
super().__init__()
__SCREAMING_SNAKE_CASE : int = nn.Linear(config.hidden_size , config.hidden_size )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = self.dense(lowerCAmelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , A__ , )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Any ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = config.num_labels
__SCREAMING_SNAKE_CASE : str = PoolFormerModel(lowerCAmelCase__ )
# Final norm
__SCREAMING_SNAKE_CASE : Union[str, Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__SCREAMING_SNAKE_CASE : int = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__SCREAMING_SNAKE_CASE : Any = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Optional[int] = self.poolformer(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = outputs[0]
__SCREAMING_SNAKE_CASE : Dict = self.classifier(self.norm(lowerCAmelCase__ ).mean([-2, -1] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE : List[str] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__SCREAMING_SNAKE_CASE : Tuple = '''single_label_classification'''
else:
__SCREAMING_SNAKE_CASE : Dict = '''multi_label_classification'''
if self.config.problem_type == "regression":
__SCREAMING_SNAKE_CASE : Optional[int] = MSELoss()
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__SCREAMING_SNAKE_CASE : int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
__SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__SCREAMING_SNAKE_CASE : List[str] = BCEWithLogitsLoss()
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
__SCREAMING_SNAKE_CASE : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 9
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39
| 0
|
def lowerCAmelCase_ ( __a ) -> float:
"""simple docstring"""
return 10 - x * x
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if equation(__a ) * equation(__a ) >= 0:
raise ValueError("Wrong space!" )
lowerCamelCase__: Dict =a
while (b - a) >= 0.0_1:
# Find middle point
lowerCamelCase__: Union[str, Any] =(a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
lowerCamelCase__: List[str] =c
else:
lowerCamelCase__: Optional[Any] =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 10
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 0
|
def _UpperCAmelCase (UpperCamelCase__ : int ):
if num < 0:
return False
_A : int = num
_A : int = 0
while num > 0:
_A : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase__ : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ):
__lowerCamelCase = AudioClassificationPipeline(model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
# test with a raw waveform
__lowerCamelCase = np.zeros((3_40_00,) )
__lowerCamelCase = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase, __lowerCamelCase = examples
__lowerCamelCase = audio_classifier(UpperCamelCase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=1 )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
self.run_torchaudio(UpperCamelCase_ )
@require_torchaudio
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
import datasets
# test with a local file
__lowerCamelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
__lowerCamelCase = dataset[0]["""audio"""]["""array"""]
__lowerCamelCase = audio_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
@require_torch
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = """anton-l/wav2vec2-random-tiny-classifier"""
__lowerCamelCase = pipeline("""audio-classification""" , model=UpperCamelCase_ )
__lowerCamelCase = np.ones((80_00,) )
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 )
__lowerCamelCase = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
__lowerCamelCase = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__lowerCamelCase = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertIn(nested_simplify(UpperCamelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase__ ( self: List[Any] ):
import datasets
__lowerCamelCase = """superb/wav2vec2-base-superb-ks"""
__lowerCamelCase = pipeline("""audio-classification""" , model=UpperCamelCase_ )
__lowerCamelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
__lowerCamelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
__lowerCamelCase = audio_classifier(UpperCamelCase_ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
| 12
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[Any] = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
A__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
A__ = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
A__ = tf.placeholder('''float64''' , [dim] )
A__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A__ = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
A__ = tf.placeholder('''int32''' )
A__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
A__ = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A__ = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
A__ = tf.placeholder('''float''' , [dim] )
A__ = tf.placeholder('''float''' , [dim] )
A__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A__ = tf.placeholder('''float''' , [noofclusters] )
A__ = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A__ = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
A__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A__ = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A__ = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
A__ = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
A__ = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
A__ = sess.run(lowercase_ )
A__ = sess.run(lowercase_ )
return centroids, assignments
| 14
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39
| 0
|
from math import ceil
def UpperCAmelCase ( a_ = 1_0_0_1 ) -> int:
"""simple docstring"""
__A = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__A = 2 * i + 1
__A = 2 * i
__A = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE :Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 15
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39
| 0
|
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : Any ):
__lowercase = Rectangle(height=0.5, width=0.5 )
__lowercase = Rectangle(height=0.25, width=0.25 )
__lowercase = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("CPU", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("GPU", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Model", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
__lowercase = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=UpperCAmelCase__, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=UpperCAmelCase__, buff=0.0 )
self.add(UpperCAmelCase__ )
model_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__, *UpperCAmelCase__, *UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Loaded Checkpoint", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase__ )
__lowercase = []
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
__lowercase = fill.copy().set_fill(UpperCAmelCase__, opacity=0.7 )
target.move_to(UpperCAmelCase__ )
ckpt_arr.append(UpperCAmelCase__ )
__lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__, *UpperCAmelCase__ )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""", font_size=1_8, )
blue_text.next_to(UpperCAmelCase__, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase__ )
__lowercase = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""", font_size=2_4, )
step_a.move_to([2, 2, 0] )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Disk", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase__, run_time=3 ), Write(UpperCAmelCase__, run_time=1 ), Create(UpperCAmelCase__, run_time=1 ) )
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
__lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase__, run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(FadeOut(UpperCAmelCase__ ) )
__lowercase = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""", font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__, run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase__, UpperCAmelCase__, *UpperCAmelCase__, *UpperCAmelCase__ ), )
self.wait()
| 17
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowerCamelCase : Dict = parser.parse_args()
if args.model_type == "bert":
__lowerCamelCase : Any = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCamelCase : Any = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__lowerCamelCase : Dict = model.state_dict()
__lowerCamelCase : str = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCamelCase : int = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCamelCase : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCamelCase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCamelCase : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCamelCase : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCamelCase : Optional[Any] = state_dict['''cls.predictions.decoder.weight''']
__lowerCamelCase : Optional[int] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCamelCase : str = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowerCamelCase : Optional[Any] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 18
|
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=4 , ) -> Optional[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 19
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.